blob: e58b9034afffe754c67fad033b03ff4f34a44765 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +010043#include "leds.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020044
Marcel Holtmannb78752c2010-08-08 23:06:53 -040045static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020046static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020047static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049/* HCI device list */
50LIST_HEAD(hci_dev_list);
51DEFINE_RWLOCK(hci_dev_list_lock);
52
53/* HCI callback list */
54LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020055DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Sasha Levin3df92b32012-05-27 22:36:56 +020057/* HCI ID Numbering */
58static DEFINE_IDA(hci_index_ida);
59
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070060/* ---- HCI debugfs entries ---- */
61
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070062static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64{
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
Prasanna Karthik74b93e92015-11-18 12:38:41 +000068 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070069 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72}
73
74static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76{
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
79 char buf[32];
80 size_t buf_size = min(count, (sizeof(buf)-1));
81 bool enable;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070093 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070094 return -EALREADY;
95
Johan Hedbergb5044302015-11-10 09:44:55 +020096 hci_req_sync_lock(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070097 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
Johan Hedbergb5044302015-11-10 09:44:55 +0200103 hci_req_sync_unlock(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 kfree_skb(skb);
109
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700111
112 return count;
113}
114
115static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120};
121
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200122static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124{
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
Prasanna Karthik74b93e92015-11-18 12:38:41 +0000128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132}
133
134static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136{
137 struct hci_dev *hdev = file->private_data;
138 char buf[32];
139 size_t buf_size = min(count, (sizeof(buf)-1));
140 bool enable;
141 int err;
142
143 if (copy_from_user(buf, user_buf, buf_size))
144 return -EFAULT;
145
146 buf[buf_size] = '\0';
147 if (strtobool(buf, &enable))
148 return -EINVAL;
149
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200150 /* When the diagnostic flags are not persistent and the transport
151 * is not active, then there is no need for the vendor callback.
152 *
153 * Instead just store the desired value. If needed the setting
154 * will be programmed when the controller gets powered on.
155 */
156 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
157 !test_bit(HCI_RUNNING, &hdev->flags))
158 goto done;
159
Johan Hedbergb5044302015-11-10 09:44:55 +0200160 hci_req_sync_lock(hdev);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200161 err = hdev->set_diag(hdev, enable);
Johan Hedbergb5044302015-11-10 09:44:55 +0200162 hci_req_sync_unlock(hdev);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200163
164 if (err < 0)
165 return err;
166
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200167done:
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200168 if (enable)
169 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170 else
171 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173 return count;
174}
175
176static const struct file_operations vendor_diag_fops = {
177 .open = simple_open,
178 .read = vendor_diag_read,
179 .write = vendor_diag_write,
180 .llseek = default_llseek,
181};
182
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200183static void hci_debugfs_create_basic(struct hci_dev *hdev)
184{
185 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186 &dut_mode_fops);
187
188 if (hdev->set_diag)
189 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190 &vendor_diag_fops);
191}
192
Johan Hedberga1d01db2015-11-11 08:11:25 +0200193static int hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200195 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200198 set_bit(HCI_RESET, &req->hdev->flags);
199 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200200 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201}
202
Johan Hedberg42c6b122013-03-05 20:37:49 +0200203static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200205 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200208 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200210 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200211 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200212
213 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215}
216
Johan Hedberg0af801b2015-02-17 15:05:21 +0200217static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200218{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200220
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200221 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200222 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300223
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700224 /* Read Local Supported Commands */
225 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300227 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200228 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300229
230 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200231 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700232
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700233 /* Read Flow Control Mode */
234 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700236 /* Read Location Data */
237 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200238}
239
Johan Hedberga1d01db2015-11-11 08:11:25 +0200240static int amp_init2(struct hci_request *req)
Johan Hedberg0af801b2015-02-17 15:05:21 +0200241{
242 /* Read Local Supported Features. Not all AMP controllers
243 * support this so it's placed conditionally in the second
244 * stage init.
245 */
246 if (req->hdev->commands[14] & 0x20)
247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200248
249 return 0;
Johan Hedberg0af801b2015-02-17 15:05:21 +0200250}
251
Johan Hedberga1d01db2015-11-11 08:11:25 +0200252static int hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200253{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200254 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200255
256 BT_DBG("%s %ld", hdev->name, opt);
257
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300258 /* Reset */
259 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200260 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300261
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200262 switch (hdev->dev_type) {
Marcel Holtmannca8bee52016-07-05 14:30:14 +0200263 case HCI_PRIMARY:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200264 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200265 break;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200266 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200267 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200268 break;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200269 default:
270 BT_ERR("Unknown device type %d", hdev->dev_type);
271 break;
272 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200273
274 return 0;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200275}
276
Johan Hedberg42c6b122013-03-05 20:37:49 +0200277static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200278{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200279 __le16 param;
280 __u8 flt_type;
281
282 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200283 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200284
285 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200286 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200287
288 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200290
291 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200292 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200293
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700294 /* Read Number of Supported IAC */
295 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
296
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700297 /* Read Current IAC LAP */
298 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
299
Johan Hedberg2177bab2013-03-05 20:37:43 +0200300 /* Clear Event Filters */
301 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200302 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200303
304 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700305 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200306 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200307}
308
Johan Hedberg42c6b122013-03-05 20:37:49 +0200309static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200310{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300311 struct hci_dev *hdev = req->hdev;
312
Johan Hedberg2177bab2013-03-05 20:37:43 +0200313 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200315
316 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200318
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800319 /* Read LE Supported States */
320 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
321
Johan Hedbergc73eee92013-04-19 18:35:21 +0300322 /* LE-only controllers have LE implicitly enabled */
323 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700324 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200325}
326
Johan Hedberg42c6b122013-03-05 20:37:49 +0200327static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200328{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 struct hci_dev *hdev = req->hdev;
330
Johan Hedberg2177bab2013-03-05 20:37:43 +0200331 /* The second byte is 0xff instead of 0x9f (two reserved bits
332 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
333 * command otherwise.
334 */
335 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
336
337 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
338 * any event mask for pre 1.2 devices.
339 */
340 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
341 return;
342
343 if (lmp_bredr_capable(hdev)) {
344 events[4] |= 0x01; /* Flow Specification Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700345 } else {
346 /* Use a different default for LE-only devices */
347 memset(events, 0, sizeof(events));
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700348 events[1] |= 0x20; /* Command Complete */
349 events[1] |= 0x40; /* Command Status */
350 events[1] |= 0x80; /* Hardware Error */
Marcel Holtmann5c3d3b42015-11-04 07:17:23 +0100351
352 /* If the controller supports the Disconnect command, enable
353 * the corresponding event. In addition enable packet flow
354 * control related events.
355 */
356 if (hdev->commands[0] & 0x20) {
357 events[0] |= 0x10; /* Disconnection Complete */
358 events[2] |= 0x04; /* Number of Completed Packets */
359 events[3] |= 0x02; /* Data Buffer Overflow */
360 }
361
362 /* If the controller supports the Read Remote Version
363 * Information command, enable the corresponding event.
364 */
365 if (hdev->commands[2] & 0x80)
366 events[1] |= 0x08; /* Read Remote Version Information
367 * Complete
368 */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200369
370 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
371 events[0] |= 0x80; /* Encryption Change */
372 events[5] |= 0x80; /* Encryption Key Refresh Complete */
373 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200374 }
375
Marcel Holtmann9fe759c2015-11-01 09:45:22 +0100376 if (lmp_inq_rssi_capable(hdev) ||
377 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 events[4] |= 0x02; /* Inquiry Result with RSSI */
379
Marcel Holtmann70f56aa2015-11-01 09:39:49 +0100380 if (lmp_ext_feat_capable(hdev))
381 events[4] |= 0x04; /* Read Remote Extended Features Complete */
382
383 if (lmp_esco_capable(hdev)) {
384 events[5] |= 0x08; /* Synchronous Connection Complete */
385 events[5] |= 0x10; /* Synchronous Connection Changed */
386 }
387
Johan Hedberg2177bab2013-03-05 20:37:43 +0200388 if (lmp_sniffsubr_capable(hdev))
389 events[5] |= 0x20; /* Sniff Subrating */
390
391 if (lmp_pause_enc_capable(hdev))
392 events[5] |= 0x80; /* Encryption Key Refresh Complete */
393
394 if (lmp_ext_inq_capable(hdev))
395 events[5] |= 0x40; /* Extended Inquiry Result */
396
397 if (lmp_no_flush_capable(hdev))
398 events[7] |= 0x01; /* Enhanced Flush Complete */
399
400 if (lmp_lsto_capable(hdev))
401 events[6] |= 0x80; /* Link Supervision Timeout Changed */
402
403 if (lmp_ssp_capable(hdev)) {
404 events[6] |= 0x01; /* IO Capability Request */
405 events[6] |= 0x02; /* IO Capability Response */
406 events[6] |= 0x04; /* User Confirmation Request */
407 events[6] |= 0x08; /* User Passkey Request */
408 events[6] |= 0x10; /* Remote OOB Data Request */
409 events[6] |= 0x20; /* Simple Pairing Complete */
410 events[7] |= 0x04; /* User Passkey Notification */
411 events[7] |= 0x08; /* Keypress Notification */
412 events[7] |= 0x10; /* Remote Host Supported
413 * Features Notification
414 */
415 }
416
417 if (lmp_le_capable(hdev))
418 events[7] |= 0x20; /* LE Meta-Event */
419
Johan Hedberg42c6b122013-03-05 20:37:49 +0200420 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200421}
422
Johan Hedberga1d01db2015-11-11 08:11:25 +0200423static int hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200424{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200425 struct hci_dev *hdev = req->hdev;
426
Johan Hedberg0af801b2015-02-17 15:05:21 +0200427 if (hdev->dev_type == HCI_AMP)
428 return amp_init2(req);
429
Johan Hedberg2177bab2013-03-05 20:37:43 +0200430 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300432 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700433 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200434
435 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200436 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200437
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100438 /* All Bluetooth 1.2 and later controllers should support the
439 * HCI command for reading the local supported commands.
440 *
441 * Unfortunately some controllers indicate Bluetooth 1.2 support,
442 * but do not have support for this command. If that is the case,
443 * the driver can quirk the behavior and skip reading the local
444 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300445 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100446 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
447 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200448 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200449
450 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700451 /* When SSP is available, then the host features page
452 * should also be available as well. However some
453 * controllers list the max_page as 0 as long as SSP
454 * has not been enabled. To achieve proper debugging
455 * output, force the minimum max_page to 1 at least.
456 */
457 hdev->max_page = 0x01;
458
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700459 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200460 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800461
Johan Hedberg42c6b122013-03-05 20:37:49 +0200462 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
463 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200464 } else {
465 struct hci_cp_write_eir cp;
466
467 memset(hdev->eir, 0, sizeof(hdev->eir));
468 memset(&cp, 0, sizeof(cp));
469
Johan Hedberg42c6b122013-03-05 20:37:49 +0200470 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200471 }
472 }
473
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800474 if (lmp_inq_rssi_capable(hdev) ||
475 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800476 u8 mode;
477
478 /* If Extended Inquiry Result events are supported, then
479 * they are clearly preferred over Inquiry Result with RSSI
480 * events.
481 */
482 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
483
484 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
485 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200486
487 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200488 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489
490 if (lmp_ext_feat_capable(hdev)) {
491 struct hci_cp_read_local_ext_features cp;
492
493 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
495 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200496 }
497
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700498 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200499 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200500 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
501 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200502 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200503
504 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200505}
506
Johan Hedberg42c6b122013-03-05 20:37:49 +0200507static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200508{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200509 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200510 struct hci_cp_write_def_link_policy cp;
511 u16 link_policy = 0;
512
513 if (lmp_rswitch_capable(hdev))
514 link_policy |= HCI_LP_RSWITCH;
515 if (lmp_hold_capable(hdev))
516 link_policy |= HCI_LP_HOLD;
517 if (lmp_sniff_capable(hdev))
518 link_policy |= HCI_LP_SNIFF;
519 if (lmp_park_capable(hdev))
520 link_policy |= HCI_LP_PARK;
521
522 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200523 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200524}
525
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529 struct hci_cp_write_le_host_supported cp;
530
Johan Hedbergc73eee92013-04-19 18:35:21 +0300531 /* LE-only devices do not support explicit enablement */
532 if (!lmp_bredr_capable(hdev))
533 return;
534
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535 memset(&cp, 0, sizeof(cp));
536
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700537 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200538 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200539 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200540 }
541
542 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200543 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
544 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200545}
546
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300547static void hci_set_event_mask_page_2(struct hci_request *req)
548{
549 struct hci_dev *hdev = req->hdev;
550 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
551
552 /* If Connectionless Slave Broadcast master role is supported
553 * enable all necessary events for it.
554 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800555 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300556 events[1] |= 0x40; /* Triggered Clock Capture */
557 events[1] |= 0x80; /* Synchronization Train Complete */
558 events[2] |= 0x10; /* Slave Page Response Timeout */
559 events[2] |= 0x20; /* CSB Channel Map Change */
560 }
561
562 /* If Connectionless Slave Broadcast slave role is supported
563 * enable all necessary events for it.
564 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800565 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300566 events[2] |= 0x01; /* Synchronization Train Received */
567 events[2] |= 0x02; /* CSB Receive */
568 events[2] |= 0x04; /* CSB Timeout */
569 events[2] |= 0x08; /* Truncated Page Complete */
570 }
571
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800572 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200573 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800574 events[2] |= 0x80;
575
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300576 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
577}
578
Johan Hedberga1d01db2015-11-11 08:11:25 +0200579static int hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200580{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200581 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300582 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200583
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200584 hci_setup_event_mask(req);
585
Johan Hedberge81be902015-08-30 21:47:20 +0300586 if (hdev->commands[6] & 0x20 &&
587 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800588 struct hci_cp_read_stored_link_key cp;
589
590 bacpy(&cp.bdaddr, BDADDR_ANY);
591 cp.read_all = 0x01;
592 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
593 }
594
Johan Hedberg2177bab2013-03-05 20:37:43 +0200595 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200596 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200597
Marcel Holtmann417287d2014-12-11 20:21:54 +0100598 if (hdev->commands[8] & 0x01)
599 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
600
601 /* Some older Broadcom based Bluetooth 1.2 controllers do not
602 * support the Read Page Scan Type command. Check support for
603 * this command in the bit mask of supported commands.
604 */
605 if (hdev->commands[13] & 0x01)
606 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
607
Andre Guedes9193c6e2014-07-01 18:10:09 -0300608 if (lmp_le_capable(hdev)) {
609 u8 events[8];
610
611 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200612
613 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
614 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300615
616 /* If controller supports the Connection Parameters Request
617 * Link Layer Procedure, enable the corresponding event.
618 */
619 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
620 events[0] |= 0x20; /* LE Remote Connection
621 * Parameter Request
622 */
623
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100624 /* If the controller supports the Data Length Extension
625 * feature, enable the corresponding event.
626 */
627 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
628 events[0] |= 0x40; /* LE Data Length Change */
629
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100630 /* If the controller supports Extended Scanner Filter
631 * Policies, enable the correspondig event.
632 */
633 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
634 events[1] |= 0x04; /* LE Direct Advertising
635 * Report
636 */
637
Marcel Holtmann9756d332017-05-01 23:54:17 -0700638 /* If the controller supports Channel Selection Algorithm #2
639 * feature, enable the corresponding event.
640 */
641 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
642 events[2] |= 0x08; /* LE Channel Selection
643 * Algorithm
644 */
645
Marcel Holtmann7d26f5c2015-11-01 09:39:51 +0100646 /* If the controller supports the LE Set Scan Enable command,
647 * enable the corresponding advertising report event.
648 */
649 if (hdev->commands[26] & 0x08)
650 events[0] |= 0x02; /* LE Advertising Report */
651
652 /* If the controller supports the LE Create Connection
653 * command, enable the corresponding event.
654 */
655 if (hdev->commands[26] & 0x10)
656 events[0] |= 0x01; /* LE Connection Complete */
657
658 /* If the controller supports the LE Connection Update
659 * command, enable the corresponding event.
660 */
661 if (hdev->commands[27] & 0x04)
662 events[0] |= 0x04; /* LE Connection Update
663 * Complete
664 */
665
666 /* If the controller supports the LE Read Remote Used Features
667 * command, enable the corresponding event.
668 */
669 if (hdev->commands[27] & 0x20)
670 events[0] |= 0x08; /* LE Read Remote Used
671 * Features Complete
672 */
673
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100674 /* If the controller supports the LE Read Local P-256
675 * Public Key command, enable the corresponding event.
676 */
677 if (hdev->commands[34] & 0x02)
678 events[0] |= 0x80; /* LE Read Local P-256
679 * Public Key Complete
680 */
681
682 /* If the controller supports the LE Generate DHKey
683 * command, enable the corresponding event.
684 */
685 if (hdev->commands[34] & 0x04)
686 events[1] |= 0x01; /* LE Generate DHKey Complete */
687
Andre Guedes9193c6e2014-07-01 18:10:09 -0300688 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
689 events);
690
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200691 if (hdev->commands[25] & 0x40) {
692 /* Read LE Advertising Channel TX Power */
693 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
694 }
695
Marcel Holtmann2ab216a2015-11-01 09:39:48 +0100696 if (hdev->commands[26] & 0x40) {
697 /* Read LE White List Size */
698 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
699 0, NULL);
700 }
701
702 if (hdev->commands[26] & 0x80) {
703 /* Clear LE White List */
704 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
705 }
706
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100707 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
708 /* Read LE Maximum Data Length */
709 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
710
711 /* Read LE Suggested Default Data Length */
712 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
713 }
714
Johan Hedberg42c6b122013-03-05 20:37:49 +0200715 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300716 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300717
718 /* Read features beyond page 1 if available */
719 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
720 struct hci_cp_read_local_ext_features cp;
721
722 cp.page = p;
723 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
724 sizeof(cp), &cp);
725 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200726
727 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200728}
729
Johan Hedberga1d01db2015-11-11 08:11:25 +0200730static int hci_init4_req(struct hci_request *req, unsigned long opt)
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300731{
732 struct hci_dev *hdev = req->hdev;
733
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800734 /* Some Broadcom based Bluetooth controllers do not support the
735 * Delete Stored Link Key command. They are clearly indicating its
736 * absence in the bit mask of supported commands.
737 *
738 * Check the supported commands and only if the the command is marked
739 * as supported send it. If not supported assume that the controller
740 * does not have actual support for stored link keys which makes this
741 * command redundant anyway.
742 *
743 * Some controllers indicate that they support handling deleting
744 * stored link keys, but they don't. The quirk lets a driver
745 * just disable this command.
746 */
747 if (hdev->commands[6] & 0x80 &&
748 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
749 struct hci_cp_delete_stored_link_key cp;
750
751 bacpy(&cp.bdaddr, BDADDR_ANY);
752 cp.delete_all = 0x01;
753 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
754 sizeof(cp), &cp);
755 }
756
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300757 /* Set event mask page 2 if the HCI command for it is supported */
758 if (hdev->commands[22] & 0x04)
759 hci_set_event_mask_page_2(req);
760
Marcel Holtmann109e3192014-07-23 19:24:56 +0200761 /* Read local codec list if the HCI command is supported */
762 if (hdev->commands[29] & 0x20)
763 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
764
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200765 /* Get MWS transport configuration if the HCI command is supported */
766 if (hdev->commands[30] & 0x08)
767 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
768
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300769 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800770 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300771 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800772
773 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700774 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800775 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800776 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800777
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800778 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
779 sizeof(support), &support);
780 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200781
Marcel Holtmann12204872017-05-01 21:43:24 -0700782 /* Set Suggested Default Data Length to maximum if supported */
783 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
784 struct hci_cp_le_write_def_data_len cp;
785
786 cp.tx_len = hdev->le_max_tx_len;
787 cp.tx_time = hdev->le_max_tx_time;
788 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
789 }
790
Johan Hedberga1d01db2015-11-11 08:11:25 +0200791 return 0;
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300792}
793
Johan Hedberg2177bab2013-03-05 20:37:43 +0200794static int __hci_init(struct hci_dev *hdev)
795{
796 int err;
797
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200798 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200799 if (err < 0)
800 return err;
801
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200802 if (hci_dev_test_flag(hdev, HCI_SETUP))
803 hci_debugfs_create_basic(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700804
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200805 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200806 if (err < 0)
807 return err;
808
Marcel Holtmannca8bee52016-07-05 14:30:14 +0200809 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
Johan Hedberg0af801b2015-02-17 15:05:21 +0200810 * BR/EDR/LE type controllers. AMP controllers only need the
811 * first two stages of init.
812 */
Marcel Holtmannca8bee52016-07-05 14:30:14 +0200813 if (hdev->dev_type != HCI_PRIMARY)
Johan Hedberg0af801b2015-02-17 15:05:21 +0200814 return 0;
815
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200816 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300817 if (err < 0)
818 return err;
819
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200820 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700821 if (err < 0)
822 return err;
823
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800824 /* This function is only called when the controller is actually in
825 * configured state. When the controller is marked as unconfigured,
826 * this initialization procedure is not run.
827 *
828 * It means that it is possible that a controller runs through its
829 * setup phase and then discovers missing settings. If that is the
830 * case, then this function will not be called. It then will only
831 * be called during the config phase.
832 *
833 * So only when in setup phase or config phase, create the debugfs
834 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700835 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700836 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
837 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700838 return 0;
839
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100840 hci_debugfs_create_common(hdev);
841
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100842 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100843 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700844
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800845 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100846 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700847
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700848 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200849}
850
Johan Hedberga1d01db2015-11-11 08:11:25 +0200851static int hci_init0_req(struct hci_request *req, unsigned long opt)
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200852{
853 struct hci_dev *hdev = req->hdev;
854
855 BT_DBG("%s %ld", hdev->name, opt);
856
857 /* Reset */
858 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
859 hci_reset_req(req, 0);
860
861 /* Read Local Version */
862 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
863
864 /* Read BD Address */
865 if (hdev->set_bdaddr)
866 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200867
868 return 0;
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200869}
870
871static int __hci_unconf_init(struct hci_dev *hdev)
872{
873 int err;
874
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200875 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
876 return 0;
877
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200878 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200879 if (err < 0)
880 return err;
881
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200882 if (hci_dev_test_flag(hdev, HCI_SETUP))
883 hci_debugfs_create_basic(hdev);
884
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200885 return 0;
886}
887
Johan Hedberga1d01db2015-11-11 08:11:25 +0200888static int hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889{
890 __u8 scan = opt;
891
Johan Hedberg42c6b122013-03-05 20:37:49 +0200892 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
894 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200895 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200896 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897}
898
Johan Hedberga1d01db2015-11-11 08:11:25 +0200899static int hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900{
901 __u8 auth = opt;
902
Johan Hedberg42c6b122013-03-05 20:37:49 +0200903 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
905 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200906 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200907 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908}
909
Johan Hedberga1d01db2015-11-11 08:11:25 +0200910static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911{
912 __u8 encrypt = opt;
913
Johan Hedberg42c6b122013-03-05 20:37:49 +0200914 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200916 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200917 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200918 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919}
920
Johan Hedberga1d01db2015-11-11 08:11:25 +0200921static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200922{
923 __le16 policy = cpu_to_le16(opt);
924
Johan Hedberg42c6b122013-03-05 20:37:49 +0200925 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200926
927 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200928 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200929 return 0;
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200930}
931
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900932/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 * Device is held on return. */
934struct hci_dev *hci_dev_get(int index)
935{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200936 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
938 BT_DBG("%d", index);
939
940 if (index < 0)
941 return NULL;
942
943 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200944 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 if (d->id == index) {
946 hdev = hci_dev_hold(d);
947 break;
948 }
949 }
950 read_unlock(&hci_dev_list_lock);
951 return hdev;
952}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200955
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200956bool hci_discovery_active(struct hci_dev *hdev)
957{
958 struct discovery_state *discov = &hdev->discovery;
959
Andre Guedes6fbe1952012-02-03 17:47:58 -0300960 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300961 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300962 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200963 return true;
964
Andre Guedes6fbe1952012-02-03 17:47:58 -0300965 default:
966 return false;
967 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200968}
969
Johan Hedbergff9ef572012-01-04 14:23:45 +0200970void hci_discovery_set_state(struct hci_dev *hdev, int state)
971{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300972 int old_state = hdev->discovery.state;
973
Johan Hedbergff9ef572012-01-04 14:23:45 +0200974 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
975
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300976 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +0200977 return;
978
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300979 hdev->discovery.state = state;
980
Johan Hedbergff9ef572012-01-04 14:23:45 +0200981 switch (state) {
982 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -0300983 hci_update_background_scan(hdev);
984
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300985 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -0300986 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200987 break;
988 case DISCOVERY_STARTING:
989 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300990 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200991 mgmt_discovering(hdev, 1);
992 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200993 case DISCOVERY_RESOLVING:
994 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200995 case DISCOVERY_STOPPING:
996 break;
997 }
Johan Hedbergff9ef572012-01-04 14:23:45 +0200998}
999
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001000void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001{
Johan Hedberg30883512012-01-04 14:16:21 +02001002 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001003 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
Johan Hedberg561aafb2012-01-04 13:31:59 +02001005 list_for_each_entry_safe(p, n, &cache->all, all) {
1006 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001007 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001009
1010 INIT_LIST_HEAD(&cache->unknown);
1011 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012}
1013
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001014struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1015 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016{
Johan Hedberg30883512012-01-04 14:16:21 +02001017 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 struct inquiry_entry *e;
1019
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001020 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
Johan Hedberg561aafb2012-01-04 13:31:59 +02001022 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001024 return e;
1025 }
1026
1027 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028}
1029
Johan Hedberg561aafb2012-01-04 13:31:59 +02001030struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001031 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001032{
Johan Hedberg30883512012-01-04 14:16:21 +02001033 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001034 struct inquiry_entry *e;
1035
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001036 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001037
1038 list_for_each_entry(e, &cache->unknown, list) {
1039 if (!bacmp(&e->data.bdaddr, bdaddr))
1040 return e;
1041 }
1042
1043 return NULL;
1044}
1045
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001046struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001047 bdaddr_t *bdaddr,
1048 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001049{
1050 struct discovery_state *cache = &hdev->discovery;
1051 struct inquiry_entry *e;
1052
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001053 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001054
1055 list_for_each_entry(e, &cache->resolve, list) {
1056 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1057 return e;
1058 if (!bacmp(&e->data.bdaddr, bdaddr))
1059 return e;
1060 }
1061
1062 return NULL;
1063}
1064
Johan Hedberga3d4e202012-01-09 00:53:02 +02001065void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001066 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001067{
1068 struct discovery_state *cache = &hdev->discovery;
1069 struct list_head *pos = &cache->resolve;
1070 struct inquiry_entry *p;
1071
1072 list_del(&ie->list);
1073
1074 list_for_each_entry(p, &cache->resolve, list) {
1075 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001076 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001077 break;
1078 pos = &p->list;
1079 }
1080
1081 list_add(&ie->list, pos);
1082}
1083
Marcel Holtmannaf589252014-07-01 14:11:20 +02001084u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1085 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086{
Johan Hedberg30883512012-01-04 14:16:21 +02001087 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001088 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001089 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001091 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092
Johan Hedberg6928a922014-10-26 20:46:09 +01001093 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001094
Marcel Holtmannaf589252014-07-01 14:11:20 +02001095 if (!data->ssp_mode)
1096 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001097
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001098 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001099 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001100 if (!ie->data.ssp_mode)
1101 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001102
Johan Hedberga3d4e202012-01-09 00:53:02 +02001103 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001104 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001105 ie->data.rssi = data->rssi;
1106 hci_inquiry_cache_update_resolve(hdev, ie);
1107 }
1108
Johan Hedberg561aafb2012-01-04 13:31:59 +02001109 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001110 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001111
Johan Hedberg561aafb2012-01-04 13:31:59 +02001112 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001113 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001114 if (!ie) {
1115 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1116 goto done;
1117 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001118
1119 list_add(&ie->all, &cache->all);
1120
1121 if (name_known) {
1122 ie->name_state = NAME_KNOWN;
1123 } else {
1124 ie->name_state = NAME_NOT_KNOWN;
1125 list_add(&ie->list, &cache->unknown);
1126 }
1127
1128update:
1129 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001130 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001131 ie->name_state = NAME_KNOWN;
1132 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 }
1134
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001135 memcpy(&ie->data, data, sizeof(*data));
1136 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001138
1139 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001140 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001141
Marcel Holtmannaf589252014-07-01 14:11:20 +02001142done:
1143 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144}
1145
1146static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1147{
Johan Hedberg30883512012-01-04 14:16:21 +02001148 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 struct inquiry_info *info = (struct inquiry_info *) buf;
1150 struct inquiry_entry *e;
1151 int copied = 0;
1152
Johan Hedberg561aafb2012-01-04 13:31:59 +02001153 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001155
1156 if (copied >= num)
1157 break;
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 bacpy(&info->bdaddr, &data->bdaddr);
1160 info->pscan_rep_mode = data->pscan_rep_mode;
1161 info->pscan_period_mode = data->pscan_period_mode;
1162 info->pscan_mode = data->pscan_mode;
1163 memcpy(info->dev_class, data->dev_class, 3);
1164 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001165
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001167 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 }
1169
1170 BT_DBG("cache %p, copied %d", cache, copied);
1171 return copied;
1172}
1173
Johan Hedberga1d01db2015-11-11 08:11:25 +02001174static int hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175{
1176 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001177 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 struct hci_cp_inquiry cp;
1179
1180 BT_DBG("%s", hdev->name);
1181
1182 if (test_bit(HCI_INQUIRY, &hdev->flags))
Johan Hedberga1d01db2015-11-11 08:11:25 +02001183 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
1185 /* Start Inquiry */
1186 memcpy(&cp.lap, &ir->lap, 3);
1187 cp.length = ir->length;
1188 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001189 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001190
1191 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192}
1193
1194int hci_inquiry(void __user *arg)
1195{
1196 __u8 __user *ptr = arg;
1197 struct hci_inquiry_req ir;
1198 struct hci_dev *hdev;
1199 int err = 0, do_inquiry = 0, max_rsp;
1200 long timeo;
1201 __u8 *buf;
1202
1203 if (copy_from_user(&ir, ptr, sizeof(ir)))
1204 return -EFAULT;
1205
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001206 hdev = hci_dev_get(ir.dev_id);
1207 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 return -ENODEV;
1209
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001210 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001211 err = -EBUSY;
1212 goto done;
1213 }
1214
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001215 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001216 err = -EOPNOTSUPP;
1217 goto done;
1218 }
1219
Marcel Holtmannca8bee52016-07-05 14:30:14 +02001220 if (hdev->dev_type != HCI_PRIMARY) {
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001221 err = -EOPNOTSUPP;
1222 goto done;
1223 }
1224
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001225 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001226 err = -EOPNOTSUPP;
1227 goto done;
1228 }
1229
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001230 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001231 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001232 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001233 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 do_inquiry = 1;
1235 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001236 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Marcel Holtmann04837f62006-07-03 10:02:33 +02001238 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001239
1240 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001241 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001242 timeo, NULL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001243 if (err < 0)
1244 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001245
1246 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1247 * cleared). If it is interrupted by a signal, return -EINTR.
1248 */
NeilBrown74316202014-07-07 15:16:04 +10001249 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001250 TASK_INTERRUPTIBLE))
1251 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001254 /* for unlimited number of responses we will use buffer with
1255 * 255 entries
1256 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1258
1259 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1260 * copy it to the user space.
1261 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001262 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001263 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 err = -ENOMEM;
1265 goto done;
1266 }
1267
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001268 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001270 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271
1272 BT_DBG("num_rsp %d", ir.num_rsp);
1273
1274 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1275 ptr += sizeof(ir);
1276 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001277 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001279 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 err = -EFAULT;
1281
1282 kfree(buf);
1283
1284done:
1285 hci_dev_put(hdev);
1286 return err;
1287}
1288
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001289static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 int ret = 0;
1292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 BT_DBG("%s %p", hdev->name, hdev);
1294
Johan Hedbergb5044302015-11-10 09:44:55 +02001295 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001297 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001298 ret = -ENODEV;
1299 goto done;
1300 }
1301
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001302 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1303 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001304 /* Check for rfkill but allow the HCI setup stage to
1305 * proceed (which in itself doesn't cause any RF activity).
1306 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001307 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001308 ret = -ERFKILL;
1309 goto done;
1310 }
1311
1312 /* Check for valid public address or a configured static
1313 * random adddress, but let the HCI setup proceed to
1314 * be able to determine if there is a public address
1315 * or not.
1316 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001317 * In case of user channel usage, it is not important
1318 * if a public address or static random address is
1319 * available.
1320 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001321 * This check is only valid for BR/EDR controllers
1322 * since AMP controllers do not have an address.
1323 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001324 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannca8bee52016-07-05 14:30:14 +02001325 hdev->dev_type == HCI_PRIMARY &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001326 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1327 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1328 ret = -EADDRNOTAVAIL;
1329 goto done;
1330 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001331 }
1332
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 if (test_bit(HCI_UP, &hdev->flags)) {
1334 ret = -EALREADY;
1335 goto done;
1336 }
1337
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 if (hdev->open(hdev)) {
1339 ret = -EIO;
1340 goto done;
1341 }
1342
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001343 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001344 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001345
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001346 atomic_set(&hdev->cmd_cnt, 1);
1347 set_bit(HCI_INIT, &hdev->flags);
1348
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001349 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmanne131d742015-10-20 02:30:47 +02001350 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1351
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001352 if (hdev->setup)
1353 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001354
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001355 /* The transport driver can set these quirks before
1356 * creating the HCI device or in its setup callback.
1357 *
1358 * In case any of them is set, the controller has to
1359 * start up as unconfigured.
1360 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001361 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1362 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001363 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001364
1365 /* For an unconfigured controller it is required to
1366 * read at least the version information provided by
1367 * the Read Local Version Information command.
1368 *
1369 * If the set_bdaddr driver callback is provided, then
1370 * also the original Bluetooth public device address
1371 * will be read using the Read BD Address command.
1372 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001373 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001374 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001375 }
1376
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001377 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001378 /* If public address change is configured, ensure that
1379 * the address gets programmed. If the driver does not
1380 * support changing the public address, fail the power
1381 * on procedure.
1382 */
1383 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1384 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001385 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1386 else
1387 ret = -EADDRNOTAVAIL;
1388 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001389
1390 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001391 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001392 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001393 ret = __hci_init(hdev);
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001394 if (!ret && hdev->post_init)
1395 ret = hdev->post_init(hdev);
1396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 }
1398
Marcel Holtmann7e995b92015-10-17 16:00:26 +02001399 /* If the HCI Reset command is clearing all diagnostic settings,
1400 * then they need to be reprogrammed after the init procedure
1401 * completed.
1402 */
1403 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1404 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1405 ret = hdev->set_diag(hdev, true);
1406
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001407 clear_bit(HCI_INIT, &hdev->flags);
1408
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 if (!ret) {
1410 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001411 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 set_bit(HCI_UP, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001413 hci_sock_dev_event(hdev, HCI_DEV_UP);
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +01001414 hci_leds_update_powered(hdev, true);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001415 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1416 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1417 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1418 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Johan Hedberg2ff13892015-11-25 16:15:44 +02001419 hci_dev_test_flag(hdev, HCI_MGMT) &&
Marcel Holtmannca8bee52016-07-05 14:30:14 +02001420 hdev->dev_type == HCI_PRIMARY) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02001421 ret = __hci_req_hci_power_on(hdev);
1422 mgmt_power_on(hdev, ret);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001423 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001424 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001426 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001427 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001428 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430 skb_queue_purge(&hdev->cmd_q);
1431 skb_queue_purge(&hdev->rx_q);
1432
1433 if (hdev->flush)
1434 hdev->flush(hdev);
1435
1436 if (hdev->sent_cmd) {
1437 kfree_skb(hdev->sent_cmd);
1438 hdev->sent_cmd = NULL;
1439 }
1440
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001441 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001442 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001443
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001445 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 }
1447
1448done:
Johan Hedbergb5044302015-11-10 09:44:55 +02001449 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 return ret;
1451}
1452
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001453/* ---- HCI ioctl helpers ---- */
1454
1455int hci_dev_open(__u16 dev)
1456{
1457 struct hci_dev *hdev;
1458 int err;
1459
1460 hdev = hci_dev_get(dev);
1461 if (!hdev)
1462 return -ENODEV;
1463
Marcel Holtmann4a964402014-07-02 19:10:33 +02001464 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001465 * up as user channel. Trying to bring them up as normal devices
1466 * will result into a failure. Only user channel operation is
1467 * possible.
1468 *
1469 * When this function is called for a user channel, the flag
1470 * HCI_USER_CHANNEL will be set first before attempting to
1471 * open the device.
1472 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001473 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1474 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001475 err = -EOPNOTSUPP;
1476 goto done;
1477 }
1478
Johan Hedberge1d08f42013-10-01 22:44:50 +03001479 /* We need to ensure that no other power on/off work is pending
1480 * before proceeding to call hci_dev_do_open. This is
1481 * particularly important if the setup procedure has not yet
1482 * completed.
1483 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001484 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001485 cancel_delayed_work(&hdev->power_off);
1486
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001487 /* After this call it is guaranteed that the setup procedure
1488 * has finished. This means that error conditions like RFKILL
1489 * or no valid public or static random address apply.
1490 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001491 flush_workqueue(hdev->req_workqueue);
1492
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001493 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001494 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001495 * so that pairing works for them. Once the management interface
1496 * is in use this bit will be cleared again and userspace has
1497 * to explicitly enable it.
1498 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001499 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1500 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001501 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001502
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001503 err = hci_dev_do_open(hdev);
1504
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001505done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001506 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001507 return err;
1508}
1509
Johan Hedbergd7347f32014-07-04 12:37:23 +03001510/* This function requires the caller holds hdev->lock */
1511static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1512{
1513 struct hci_conn_params *p;
1514
Johan Hedbergf161dd42014-08-15 21:06:54 +03001515 list_for_each_entry(p, &hdev->le_conn_params, list) {
1516 if (p->conn) {
1517 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001518 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001519 p->conn = NULL;
1520 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001521 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001522 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001523
1524 BT_DBG("All LE pending actions cleared");
1525}
1526
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001527int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528{
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001529 bool auto_off;
1530
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 BT_DBG("%s %p", hdev->name, hdev);
1532
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001533 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001534 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001535 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001536 /* Execute vendor specific shutdown routine */
1537 if (hdev->shutdown)
1538 hdev->shutdown(hdev);
1539 }
1540
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001541 cancel_delayed_work(&hdev->power_off);
1542
Johan Hedberg7df0f732015-11-12 15:15:00 +02001543 hci_request_cancel_all(hdev);
Johan Hedbergb5044302015-11-10 09:44:55 +02001544 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545
1546 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001547 cancel_delayed_work_sync(&hdev->cmd_timer);
Johan Hedbergb5044302015-11-10 09:44:55 +02001548 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 return 0;
1550 }
1551
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +01001552 hci_leds_update_powered(hdev, false);
1553
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001554 /* Flush RX and TX works */
1555 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001556 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001558 if (hdev->discov_timeout > 0) {
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001559 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001560 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1561 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001562 }
1563
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001564 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001565 cancel_delayed_work(&hdev->service_cache);
1566
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001567 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001568 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001569
Johan Hedberg76727c02014-11-18 09:00:14 +02001570 /* Avoid potential lockdep warnings from the *_flush() calls by
1571 * ensuring the workqueue is empty up front.
1572 */
1573 drain_workqueue(hdev->workqueue);
1574
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001575 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001576
Johan Hedberg8f502f82015-01-28 19:56:02 +02001577 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1578
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001579 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1580
Marcel Holtmannca8bee52016-07-05 14:30:14 +02001581 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
Marcel Holtmannbaab7932016-09-04 05:13:46 +02001582 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Johan Hedberg2ff13892015-11-25 16:15:44 +02001583 hci_dev_test_flag(hdev, HCI_MGMT))
1584 __mgmt_power_off(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001585
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001586 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001587 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001588 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001589 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
Marcel Holtmann64dae962015-01-28 14:10:28 -08001591 smp_unregister(hdev);
1592
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001593 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
1595 if (hdev->flush)
1596 hdev->flush(hdev);
1597
1598 /* Reset device */
1599 skb_queue_purge(&hdev->cmd_q);
1600 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001601 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1602 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001604 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 clear_bit(HCI_INIT, &hdev->flags);
1606 }
1607
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001608 /* flush cmd work */
1609 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
1611 /* Drop queues */
1612 skb_queue_purge(&hdev->rx_q);
1613 skb_queue_purge(&hdev->cmd_q);
1614 skb_queue_purge(&hdev->raw_q);
1615
1616 /* Drop last sent command */
1617 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001618 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 kfree_skb(hdev->sent_cmd);
1620 hdev->sent_cmd = NULL;
1621 }
1622
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001623 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001624 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001625
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 /* After this point our queues are empty
1627 * and no tasks are scheduled. */
1628 hdev->close(hdev);
1629
Johan Hedberg35b973c2013-03-15 17:06:59 -05001630 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001631 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001632 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001633
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001634 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001635 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001636
Johan Hedberge59fda82012-02-22 18:11:53 +02001637 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001638 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001639 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001640
Johan Hedbergb5044302015-11-10 09:44:55 +02001641 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
1643 hci_dev_put(hdev);
1644 return 0;
1645}
1646
1647int hci_dev_close(__u16 dev)
1648{
1649 struct hci_dev *hdev;
1650 int err;
1651
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001652 hdev = hci_dev_get(dev);
1653 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001655
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001656 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001657 err = -EBUSY;
1658 goto done;
1659 }
1660
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001661 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001662 cancel_delayed_work(&hdev->power_off);
1663
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001665
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001666done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 hci_dev_put(hdev);
1668 return err;
1669}
1670
Marcel Holtmann5c912492015-01-28 11:53:05 -08001671static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001673 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
Marcel Holtmann5c912492015-01-28 11:53:05 -08001675 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
Johan Hedbergb5044302015-11-10 09:44:55 +02001677 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 /* Drop queues */
1680 skb_queue_purge(&hdev->rx_q);
1681 skb_queue_purge(&hdev->cmd_q);
1682
Johan Hedberg76727c02014-11-18 09:00:14 +02001683 /* Avoid potential lockdep warnings from the *_flush() calls by
1684 * ensuring the workqueue is empty up front.
1685 */
1686 drain_workqueue(hdev->workqueue);
1687
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001688 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001689 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001691 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692
1693 if (hdev->flush)
1694 hdev->flush(hdev);
1695
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001696 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001697 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001699 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700
Johan Hedbergb5044302015-11-10 09:44:55 +02001701 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 return ret;
1703}
1704
Marcel Holtmann5c912492015-01-28 11:53:05 -08001705int hci_dev_reset(__u16 dev)
1706{
1707 struct hci_dev *hdev;
1708 int err;
1709
1710 hdev = hci_dev_get(dev);
1711 if (!hdev)
1712 return -ENODEV;
1713
1714 if (!test_bit(HCI_UP, &hdev->flags)) {
1715 err = -ENETDOWN;
1716 goto done;
1717 }
1718
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001719 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001720 err = -EBUSY;
1721 goto done;
1722 }
1723
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001724 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001725 err = -EOPNOTSUPP;
1726 goto done;
1727 }
1728
1729 err = hci_dev_do_reset(hdev);
1730
1731done:
1732 hci_dev_put(hdev);
1733 return err;
1734}
1735
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736int hci_dev_reset_stat(__u16 dev)
1737{
1738 struct hci_dev *hdev;
1739 int ret = 0;
1740
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001741 hdev = hci_dev_get(dev);
1742 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 return -ENODEV;
1744
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001745 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001746 ret = -EBUSY;
1747 goto done;
1748 }
1749
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001750 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001751 ret = -EOPNOTSUPP;
1752 goto done;
1753 }
1754
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1756
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001757done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 return ret;
1760}
1761
Johan Hedberg123abc02014-07-10 12:09:07 +03001762static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1763{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001764 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001765
1766 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1767
1768 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001769 conn_changed = !hci_dev_test_and_set_flag(hdev,
1770 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001771 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001772 conn_changed = hci_dev_test_and_clear_flag(hdev,
1773 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001774
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001775 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001776 discov_changed = !hci_dev_test_and_set_flag(hdev,
1777 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001778 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001779 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001780 discov_changed = hci_dev_test_and_clear_flag(hdev,
1781 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001782 }
1783
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001784 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001785 return;
1786
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001787 if (conn_changed || discov_changed) {
1788 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001789 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001790
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001791 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02001792 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001793
Johan Hedberg123abc02014-07-10 12:09:07 +03001794 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001795 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001796}
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798int hci_dev_cmd(unsigned int cmd, void __user *arg)
1799{
1800 struct hci_dev *hdev;
1801 struct hci_dev_req dr;
1802 int err = 0;
1803
1804 if (copy_from_user(&dr, arg, sizeof(dr)))
1805 return -EFAULT;
1806
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001807 hdev = hci_dev_get(dr.dev_id);
1808 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 return -ENODEV;
1810
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001811 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001812 err = -EBUSY;
1813 goto done;
1814 }
1815
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001816 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001817 err = -EOPNOTSUPP;
1818 goto done;
1819 }
1820
Marcel Holtmannca8bee52016-07-05 14:30:14 +02001821 if (hdev->dev_type != HCI_PRIMARY) {
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001822 err = -EOPNOTSUPP;
1823 goto done;
1824 }
1825
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001826 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001827 err = -EOPNOTSUPP;
1828 goto done;
1829 }
1830
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 switch (cmd) {
1832 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001833 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001834 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 break;
1836
1837 case HCISETENCRYPT:
1838 if (!lmp_encrypt_capable(hdev)) {
1839 err = -EOPNOTSUPP;
1840 break;
1841 }
1842
1843 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1844 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001845 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001846 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 if (err)
1848 break;
1849 }
1850
Johan Hedberg01178cd2013-03-05 20:37:41 +02001851 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001852 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 break;
1854
1855 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001856 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001857 HCI_INIT_TIMEOUT, NULL);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001858
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001859 /* Ensure that the connectable and discoverable states
1860 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001861 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001862 if (!err)
1863 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 break;
1865
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001866 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001867 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001868 HCI_INIT_TIMEOUT, NULL);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001869 break;
1870
1871 case HCISETLINKMODE:
1872 hdev->link_mode = ((__u16) dr.dev_opt) &
1873 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1874 break;
1875
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 case HCISETPTYPE:
1877 hdev->pkt_type = (__u16) dr.dev_opt;
1878 break;
1879
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001881 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1882 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 break;
1884
1885 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001886 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1887 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 break;
1889
1890 default:
1891 err = -EINVAL;
1892 break;
1893 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001894
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001895done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 hci_dev_put(hdev);
1897 return err;
1898}
1899
1900int hci_get_dev_list(void __user *arg)
1901{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001902 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 struct hci_dev_list_req *dl;
1904 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 int n = 0, size, err;
1906 __u16 dev_num;
1907
1908 if (get_user(dev_num, (__u16 __user *) arg))
1909 return -EFAULT;
1910
1911 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1912 return -EINVAL;
1913
1914 size = sizeof(*dl) + dev_num * sizeof(*dr);
1915
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001916 dl = kzalloc(size, GFP_KERNEL);
1917 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 return -ENOMEM;
1919
1920 dr = dl->dev_req;
1921
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001922 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001923 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001924 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001925
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001926 /* When the auto-off is configured it means the transport
1927 * is running, but in that case still indicate that the
1928 * device is actually down.
1929 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001930 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001931 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001932
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001934 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001935
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 if (++n >= dev_num)
1937 break;
1938 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001939 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940
1941 dl->dev_num = n;
1942 size = sizeof(*dl) + n * sizeof(*dr);
1943
1944 err = copy_to_user(arg, dl, size);
1945 kfree(dl);
1946
1947 return err ? -EFAULT : 0;
1948}
1949
1950int hci_get_dev_info(void __user *arg)
1951{
1952 struct hci_dev *hdev;
1953 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001954 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 int err = 0;
1956
1957 if (copy_from_user(&di, arg, sizeof(di)))
1958 return -EFAULT;
1959
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001960 hdev = hci_dev_get(di.dev_id);
1961 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 return -ENODEV;
1963
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001964 /* When the auto-off is configured it means the transport
1965 * is running, but in that case still indicate that the
1966 * device is actually down.
1967 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001968 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001969 flags = hdev->flags & ~BIT(HCI_UP);
1970 else
1971 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001972
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 strcpy(di.name, hdev->name);
1974 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001975 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001976 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001978 if (lmp_bredr_capable(hdev)) {
1979 di.acl_mtu = hdev->acl_mtu;
1980 di.acl_pkts = hdev->acl_pkts;
1981 di.sco_mtu = hdev->sco_mtu;
1982 di.sco_pkts = hdev->sco_pkts;
1983 } else {
1984 di.acl_mtu = hdev->le_mtu;
1985 di.acl_pkts = hdev->le_pkts;
1986 di.sco_mtu = 0;
1987 di.sco_pkts = 0;
1988 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 di.link_policy = hdev->link_policy;
1990 di.link_mode = hdev->link_mode;
1991
1992 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1993 memcpy(&di.features, &hdev->features, sizeof(di.features));
1994
1995 if (copy_to_user(arg, &di, sizeof(di)))
1996 err = -EFAULT;
1997
1998 hci_dev_put(hdev);
1999
2000 return err;
2001}
2002
2003/* ---- Interface to HCI drivers ---- */
2004
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002005static int hci_rfkill_set_block(void *data, bool blocked)
2006{
2007 struct hci_dev *hdev = data;
2008
2009 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2010
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002011 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002012 return -EBUSY;
2013
Johan Hedberg5e130362013-09-13 08:58:17 +03002014 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002015 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002016 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2017 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002018 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002019 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002020 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002021 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002022
2023 return 0;
2024}
2025
2026static const struct rfkill_ops hci_rfkill_ops = {
2027 .set_block = hci_rfkill_set_block,
2028};
2029
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002030static void hci_power_on(struct work_struct *work)
2031{
2032 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002033 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002034
2035 BT_DBG("%s", hdev->name);
2036
Johan Hedberg2ff13892015-11-25 16:15:44 +02002037 if (test_bit(HCI_UP, &hdev->flags) &&
2038 hci_dev_test_flag(hdev, HCI_MGMT) &&
2039 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
Wei-Ning Huangd82142a2016-02-15 17:09:51 +08002040 cancel_delayed_work(&hdev->power_off);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002041 hci_req_sync_lock(hdev);
2042 err = __hci_req_hci_power_on(hdev);
2043 hci_req_sync_unlock(hdev);
2044 mgmt_power_on(hdev, err);
2045 return;
2046 }
2047
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002048 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002049 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302050 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002051 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302052 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002053 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002054 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002055
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002056 /* During the HCI setup phase, a few error conditions are
2057 * ignored and they need to be checked now. If they are still
2058 * valid, it is important to turn the device back off.
2059 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002060 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2061 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmannca8bee52016-07-05 14:30:14 +02002062 (hdev->dev_type == HCI_PRIMARY &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002063 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2064 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002065 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002066 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002067 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002068 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2069 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002070 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002071
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002072 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002073 /* For unconfigured devices, set the HCI_RAW flag
2074 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002075 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002076 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002077 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002078
2079 /* For fully configured devices, this will send
2080 * the Index Added event. For unconfigured devices,
2081 * it will send Unconfigued Index Added event.
2082 *
2083 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2084 * and no event will be send.
2085 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002086 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002087 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002088 /* When the controller is now configured, then it
2089 * is important to clear the HCI_RAW flag.
2090 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002091 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002092 clear_bit(HCI_RAW, &hdev->flags);
2093
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002094 /* Powering on the controller with HCI_CONFIG set only
2095 * happens with the transition from unconfigured to
2096 * configured. This will send the Index Added event.
2097 */
2098 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002099 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002100}
2101
2102static void hci_power_off(struct work_struct *work)
2103{
Johan Hedberg32435532011-11-07 22:16:04 +02002104 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002105 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002106
2107 BT_DBG("%s", hdev->name);
2108
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002109 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002110}
2111
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002112static void hci_error_reset(struct work_struct *work)
2113{
2114 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2115
2116 BT_DBG("%s", hdev->name);
2117
2118 if (hdev->hw_error)
2119 hdev->hw_error(hdev, hdev->hw_error_code);
2120 else
2121 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2122 hdev->hw_error_code);
2123
2124 if (hci_dev_do_close(hdev))
2125 return;
2126
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002127 hci_dev_do_open(hdev);
2128}
2129
Johan Hedberg35f74982014-02-18 17:14:32 +02002130void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002131{
Johan Hedberg48210022013-01-27 00:31:28 +02002132 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002133
Johan Hedberg48210022013-01-27 00:31:28 +02002134 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2135 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002136 kfree(uuid);
2137 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002138}
2139
Johan Hedberg35f74982014-02-18 17:14:32 +02002140void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002141{
Johan Hedberg0378b592014-11-19 15:22:22 +02002142 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002143
Johan Hedberg0378b592014-11-19 15:22:22 +02002144 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2145 list_del_rcu(&key->list);
2146 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002147 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002148}
2149
Johan Hedberg35f74982014-02-18 17:14:32 +02002150void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002151{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002152 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002153
Johan Hedberg970d0f12014-11-13 14:37:47 +02002154 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2155 list_del_rcu(&k->list);
2156 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002157 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002158}
2159
Johan Hedberg970c4e42014-02-18 10:19:33 +02002160void hci_smp_irks_clear(struct hci_dev *hdev)
2161{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002162 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002163
Johan Hedbergadae20c2014-11-13 14:37:48 +02002164 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2165 list_del_rcu(&k->list);
2166 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002167 }
2168}
2169
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002170struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2171{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002172 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002173
Johan Hedberg0378b592014-11-19 15:22:22 +02002174 rcu_read_lock();
2175 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2176 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2177 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002178 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002179 }
2180 }
2181 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002182
2183 return NULL;
2184}
2185
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302186static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002187 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002188{
2189 /* Legacy key */
2190 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302191 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002192
2193 /* Debug keys are insecure so don't store them persistently */
2194 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302195 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002196
2197 /* Changed combination key and there's no previous one */
2198 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302199 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002200
2201 /* Security mode 3 case */
2202 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302203 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002204
Johan Hedberge3befab2014-06-01 16:33:39 +03002205 /* BR/EDR key derived using SC from an LE link */
2206 if (conn->type == LE_LINK)
2207 return true;
2208
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002209 /* Neither local nor remote side had no-bonding as requirement */
2210 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302211 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002212
2213 /* Local side had dedicated bonding as requirement */
2214 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302215 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002216
2217 /* Remote side had dedicated bonding as requirement */
2218 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302219 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002220
2221 /* If none of the above criteria match, then don't store the key
2222 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302223 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002224}
2225
Johan Hedberge804d252014-07-16 11:42:28 +03002226static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002227{
Johan Hedberge804d252014-07-16 11:42:28 +03002228 if (type == SMP_LTK)
2229 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002230
Johan Hedberge804d252014-07-16 11:42:28 +03002231 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002232}
2233
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002234struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2235 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002236{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002237 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002238
Johan Hedberg970d0f12014-11-13 14:37:47 +02002239 rcu_read_lock();
2240 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002241 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2242 continue;
2243
Johan Hedberg923e2412014-12-03 12:43:39 +02002244 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002245 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002246 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002247 }
2248 }
2249 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002250
2251 return NULL;
2252}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002253
Johan Hedberg970c4e42014-02-18 10:19:33 +02002254struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2255{
2256 struct smp_irk *irk;
2257
Johan Hedbergadae20c2014-11-13 14:37:48 +02002258 rcu_read_lock();
2259 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2260 if (!bacmp(&irk->rpa, rpa)) {
2261 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002262 return irk;
2263 }
2264 }
2265
Johan Hedbergadae20c2014-11-13 14:37:48 +02002266 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2267 if (smp_irk_matches(hdev, irk->val, rpa)) {
2268 bacpy(&irk->rpa, rpa);
2269 rcu_read_unlock();
2270 return irk;
2271 }
2272 }
2273 rcu_read_unlock();
2274
Johan Hedberg970c4e42014-02-18 10:19:33 +02002275 return NULL;
2276}
2277
2278struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2279 u8 addr_type)
2280{
2281 struct smp_irk *irk;
2282
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002283 /* Identity Address must be public or static random */
2284 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2285 return NULL;
2286
Johan Hedbergadae20c2014-11-13 14:37:48 +02002287 rcu_read_lock();
2288 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002289 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002290 bacmp(bdaddr, &irk->bdaddr) == 0) {
2291 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002292 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002293 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002294 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002295 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002296
2297 return NULL;
2298}
2299
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002300struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002301 bdaddr_t *bdaddr, u8 *val, u8 type,
2302 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002303{
2304 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302305 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002306
2307 old_key = hci_find_link_key(hdev, bdaddr);
2308 if (old_key) {
2309 old_key_type = old_key->type;
2310 key = old_key;
2311 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002312 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002313 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002314 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002315 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002316 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002317 }
2318
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002319 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002320
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002321 /* Some buggy controller combinations generate a changed
2322 * combination key for legacy pairing even when there's no
2323 * previous key */
2324 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002325 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002326 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002327 if (conn)
2328 conn->key_type = type;
2329 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002330
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002331 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002332 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002333 key->pin_len = pin_len;
2334
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002335 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002336 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002337 else
2338 key->type = type;
2339
Johan Hedberg7652ff62014-06-24 13:15:49 +03002340 if (persistent)
2341 *persistent = hci_persistent_key(hdev, conn, type,
2342 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002343
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002344 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002345}
2346
Johan Hedbergca9142b2014-02-19 14:57:44 +02002347struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002348 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002349 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002350{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002351 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002352 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002353
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002354 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002355 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002356 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002357 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002358 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002359 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002360 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002361 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002362 }
2363
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002364 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002365 key->bdaddr_type = addr_type;
2366 memcpy(key->val, tk, sizeof(key->val));
2367 key->authenticated = authenticated;
2368 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002369 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002370 key->enc_size = enc_size;
2371 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002372
Johan Hedbergca9142b2014-02-19 14:57:44 +02002373 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002374}
2375
Johan Hedbergca9142b2014-02-19 14:57:44 +02002376struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2377 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002378{
2379 struct smp_irk *irk;
2380
2381 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2382 if (!irk) {
2383 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2384 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002385 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002386
2387 bacpy(&irk->bdaddr, bdaddr);
2388 irk->addr_type = addr_type;
2389
Johan Hedbergadae20c2014-11-13 14:37:48 +02002390 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002391 }
2392
2393 memcpy(irk->val, val, 16);
2394 bacpy(&irk->rpa, rpa);
2395
Johan Hedbergca9142b2014-02-19 14:57:44 +02002396 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002397}
2398
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002399int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2400{
2401 struct link_key *key;
2402
2403 key = hci_find_link_key(hdev, bdaddr);
2404 if (!key)
2405 return -ENOENT;
2406
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002407 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002408
Johan Hedberg0378b592014-11-19 15:22:22 +02002409 list_del_rcu(&key->list);
2410 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002411
2412 return 0;
2413}
2414
Johan Hedberge0b2b272014-02-18 17:14:31 +02002415int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002416{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002417 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002418 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002419
Johan Hedberg970d0f12014-11-13 14:37:47 +02002420 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002421 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002422 continue;
2423
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002424 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002425
Johan Hedberg970d0f12014-11-13 14:37:47 +02002426 list_del_rcu(&k->list);
2427 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002428 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002429 }
2430
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002431 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002432}
2433
Johan Hedberga7ec7332014-02-18 17:14:35 +02002434void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2435{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002436 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002437
Johan Hedbergadae20c2014-11-13 14:37:48 +02002438 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002439 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2440 continue;
2441
2442 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2443
Johan Hedbergadae20c2014-11-13 14:37:48 +02002444 list_del_rcu(&k->list);
2445 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002446 }
2447}
2448
Johan Hedberg55e76b32015-03-10 22:34:40 +02002449bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2450{
2451 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002452 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002453 u8 addr_type;
2454
2455 if (type == BDADDR_BREDR) {
2456 if (hci_find_link_key(hdev, bdaddr))
2457 return true;
2458 return false;
2459 }
2460
2461 /* Convert to HCI addr type which struct smp_ltk uses */
2462 if (type == BDADDR_LE_PUBLIC)
2463 addr_type = ADDR_LE_DEV_PUBLIC;
2464 else
2465 addr_type = ADDR_LE_DEV_RANDOM;
2466
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002467 irk = hci_get_irk(hdev, bdaddr, addr_type);
2468 if (irk) {
2469 bdaddr = &irk->bdaddr;
2470 addr_type = irk->addr_type;
2471 }
2472
Johan Hedberg55e76b32015-03-10 22:34:40 +02002473 rcu_read_lock();
2474 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002475 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2476 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002477 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002478 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002479 }
2480 rcu_read_unlock();
2481
2482 return false;
2483}
2484
Ville Tervo6bd32322011-02-16 16:32:41 +02002485/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002486static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002487{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002488 struct hci_dev *hdev = container_of(work, struct hci_dev,
2489 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002490
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002491 if (hdev->sent_cmd) {
2492 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2493 u16 opcode = __le16_to_cpu(sent->opcode);
2494
2495 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2496 } else {
2497 BT_ERR("%s command tx timeout", hdev->name);
2498 }
2499
Ville Tervo6bd32322011-02-16 16:32:41 +02002500 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002501 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002502}
2503
Szymon Janc2763eda2011-03-22 13:12:22 +01002504struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002505 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002506{
2507 struct oob_data *data;
2508
Johan Hedberg6928a922014-10-26 20:46:09 +01002509 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2510 if (bacmp(bdaddr, &data->bdaddr) != 0)
2511 continue;
2512 if (data->bdaddr_type != bdaddr_type)
2513 continue;
2514 return data;
2515 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002516
2517 return NULL;
2518}
2519
Johan Hedberg6928a922014-10-26 20:46:09 +01002520int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2521 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002522{
2523 struct oob_data *data;
2524
Johan Hedberg6928a922014-10-26 20:46:09 +01002525 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002526 if (!data)
2527 return -ENOENT;
2528
Johan Hedberg6928a922014-10-26 20:46:09 +01002529 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002530
2531 list_del(&data->list);
2532 kfree(data);
2533
2534 return 0;
2535}
2536
Johan Hedberg35f74982014-02-18 17:14:32 +02002537void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002538{
2539 struct oob_data *data, *n;
2540
2541 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2542 list_del(&data->list);
2543 kfree(data);
2544 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002545}
2546
Marcel Holtmann07988722014-01-10 02:07:29 -08002547int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002548 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002549 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002550{
2551 struct oob_data *data;
2552
Johan Hedberg6928a922014-10-26 20:46:09 +01002553 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002554 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002555 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002556 if (!data)
2557 return -ENOMEM;
2558
2559 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002560 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002561 list_add(&data->list, &hdev->remote_oob_data);
2562 }
2563
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002564 if (hash192 && rand192) {
2565 memcpy(data->hash192, hash192, sizeof(data->hash192));
2566 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002567 if (hash256 && rand256)
2568 data->present = 0x03;
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002569 } else {
2570 memset(data->hash192, 0, sizeof(data->hash192));
2571 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002572 if (hash256 && rand256)
2573 data->present = 0x02;
2574 else
2575 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002576 }
2577
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002578 if (hash256 && rand256) {
2579 memcpy(data->hash256, hash256, sizeof(data->hash256));
2580 memcpy(data->rand256, rand256, sizeof(data->rand256));
2581 } else {
2582 memset(data->hash256, 0, sizeof(data->hash256));
2583 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002584 if (hash192 && rand192)
2585 data->present = 0x01;
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002586 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002587
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002588 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002589
2590 return 0;
2591}
2592
Florian Grandeld2609b32015-06-18 03:16:34 +02002593/* This function requires the caller holds hdev->lock */
2594struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2595{
2596 struct adv_info *adv_instance;
2597
2598 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2599 if (adv_instance->instance == instance)
2600 return adv_instance;
2601 }
2602
2603 return NULL;
2604}
2605
2606/* This function requires the caller holds hdev->lock */
Prasanna Karthik74b93e92015-11-18 12:38:41 +00002607struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2608{
Florian Grandeld2609b32015-06-18 03:16:34 +02002609 struct adv_info *cur_instance;
2610
2611 cur_instance = hci_find_adv_instance(hdev, instance);
2612 if (!cur_instance)
2613 return NULL;
2614
2615 if (cur_instance == list_last_entry(&hdev->adv_instances,
2616 struct adv_info, list))
2617 return list_first_entry(&hdev->adv_instances,
2618 struct adv_info, list);
2619 else
2620 return list_next_entry(cur_instance, list);
2621}
2622
2623/* This function requires the caller holds hdev->lock */
2624int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2625{
2626 struct adv_info *adv_instance;
2627
2628 adv_instance = hci_find_adv_instance(hdev, instance);
2629 if (!adv_instance)
2630 return -ENOENT;
2631
2632 BT_DBG("%s removing %dMR", hdev->name, instance);
2633
Johan Hedbergcab054a2015-11-30 11:21:45 +02002634 if (hdev->cur_adv_instance == instance) {
2635 if (hdev->adv_instance_timeout) {
2636 cancel_delayed_work(&hdev->adv_instance_expire);
2637 hdev->adv_instance_timeout = 0;
2638 }
2639 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02002640 }
2641
Florian Grandeld2609b32015-06-18 03:16:34 +02002642 list_del(&adv_instance->list);
2643 kfree(adv_instance);
2644
2645 hdev->adv_instance_cnt--;
2646
2647 return 0;
2648}
2649
2650/* This function requires the caller holds hdev->lock */
2651void hci_adv_instances_clear(struct hci_dev *hdev)
2652{
2653 struct adv_info *adv_instance, *n;
2654
Florian Grandel5d900e42015-06-18 03:16:35 +02002655 if (hdev->adv_instance_timeout) {
2656 cancel_delayed_work(&hdev->adv_instance_expire);
2657 hdev->adv_instance_timeout = 0;
2658 }
2659
Florian Grandeld2609b32015-06-18 03:16:34 +02002660 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2661 list_del(&adv_instance->list);
2662 kfree(adv_instance);
2663 }
2664
2665 hdev->adv_instance_cnt = 0;
Johan Hedbergcab054a2015-11-30 11:21:45 +02002666 hdev->cur_adv_instance = 0x00;
Florian Grandeld2609b32015-06-18 03:16:34 +02002667}
2668
2669/* This function requires the caller holds hdev->lock */
2670int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2671 u16 adv_data_len, u8 *adv_data,
2672 u16 scan_rsp_len, u8 *scan_rsp_data,
2673 u16 timeout, u16 duration)
2674{
2675 struct adv_info *adv_instance;
2676
2677 adv_instance = hci_find_adv_instance(hdev, instance);
2678 if (adv_instance) {
2679 memset(adv_instance->adv_data, 0,
2680 sizeof(adv_instance->adv_data));
2681 memset(adv_instance->scan_rsp_data, 0,
2682 sizeof(adv_instance->scan_rsp_data));
2683 } else {
2684 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2685 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2686 return -EOVERFLOW;
2687
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002688 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002689 if (!adv_instance)
2690 return -ENOMEM;
2691
Florian Grandelfffd38b2015-06-18 03:16:47 +02002692 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002693 adv_instance->instance = instance;
2694 list_add(&adv_instance->list, &hdev->adv_instances);
2695 hdev->adv_instance_cnt++;
2696 }
2697
2698 adv_instance->flags = flags;
2699 adv_instance->adv_data_len = adv_data_len;
2700 adv_instance->scan_rsp_len = scan_rsp_len;
2701
2702 if (adv_data_len)
2703 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2704
2705 if (scan_rsp_len)
2706 memcpy(adv_instance->scan_rsp_data,
2707 scan_rsp_data, scan_rsp_len);
2708
2709 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002710 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002711
2712 if (duration == 0)
2713 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2714 else
2715 adv_instance->duration = duration;
2716
2717 BT_DBG("%s for %dMR", hdev->name, instance);
2718
2719 return 0;
2720}
2721
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002722struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002723 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002724{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002725 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002726
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002727 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002728 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002729 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002730 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002731
2732 return NULL;
2733}
2734
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002735void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002736{
Geliang Tang7eb74042015-12-18 23:33:25 +08002737 struct bdaddr_list *b, *n;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002738
Geliang Tang7eb74042015-12-18 23:33:25 +08002739 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2740 list_del(&b->list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002741 kfree(b);
2742 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002743}
2744
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002745int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002746{
2747 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002748
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002749 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002750 return -EBADF;
2751
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002752 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002753 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002754
Johan Hedberg27f70f32014-07-21 10:50:06 +03002755 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002756 if (!entry)
2757 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002758
2759 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002760 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002761
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002762 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002763
2764 return 0;
2765}
2766
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002767int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002768{
2769 struct bdaddr_list *entry;
2770
Johan Hedberg35f74982014-02-18 17:14:32 +02002771 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002772 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002773 return 0;
2774 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002775
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002776 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002777 if (!entry)
2778 return -ENOENT;
2779
2780 list_del(&entry->list);
2781 kfree(entry);
2782
2783 return 0;
2784}
2785
Andre Guedes15819a72014-02-03 13:56:18 -03002786/* This function requires the caller holds hdev->lock */
2787struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2788 bdaddr_t *addr, u8 addr_type)
2789{
2790 struct hci_conn_params *params;
2791
2792 list_for_each_entry(params, &hdev->le_conn_params, list) {
2793 if (bacmp(&params->addr, addr) == 0 &&
2794 params->addr_type == addr_type) {
2795 return params;
2796 }
2797 }
2798
2799 return NULL;
2800}
2801
2802/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002803struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2804 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002805{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002806 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002807
Johan Hedberg501f8822014-07-04 12:37:26 +03002808 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002809 if (bacmp(&param->addr, addr) == 0 &&
2810 param->addr_type == addr_type)
2811 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002812 }
2813
2814 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002815}
2816
2817/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002818struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2819 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002820{
2821 struct hci_conn_params *params;
2822
2823 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002824 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002825 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002826
2827 params = kzalloc(sizeof(*params), GFP_KERNEL);
2828 if (!params) {
2829 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002830 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002831 }
2832
2833 bacpy(&params->addr, addr);
2834 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002835
2836 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002837 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002838
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002839 params->conn_min_interval = hdev->le_conn_min_interval;
2840 params->conn_max_interval = hdev->le_conn_max_interval;
2841 params->conn_latency = hdev->le_conn_latency;
2842 params->supervision_timeout = hdev->le_supv_timeout;
2843 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2844
2845 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2846
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002847 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002848}
2849
Johan Hedbergf6c63242014-08-15 21:06:59 +03002850static void hci_conn_params_free(struct hci_conn_params *params)
2851{
2852 if (params->conn) {
2853 hci_conn_drop(params->conn);
2854 hci_conn_put(params->conn);
2855 }
2856
2857 list_del(&params->action);
2858 list_del(&params->list);
2859 kfree(params);
2860}
2861
Andre Guedes15819a72014-02-03 13:56:18 -03002862/* This function requires the caller holds hdev->lock */
2863void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2864{
2865 struct hci_conn_params *params;
2866
2867 params = hci_conn_params_lookup(hdev, addr, addr_type);
2868 if (!params)
2869 return;
2870
Johan Hedbergf6c63242014-08-15 21:06:59 +03002871 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002872
Johan Hedberg95305ba2014-07-04 12:37:21 +03002873 hci_update_background_scan(hdev);
2874
Andre Guedes15819a72014-02-03 13:56:18 -03002875 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2876}
2877
2878/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03002879void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002880{
2881 struct hci_conn_params *params, *tmp;
2882
2883 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03002884 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2885 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02002886
2887 /* If trying to estabilish one time connection to disabled
2888 * device, leave the params, but mark them as just once.
2889 */
2890 if (params->explicit_connect) {
2891 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2892 continue;
2893 }
2894
Andre Guedes15819a72014-02-03 13:56:18 -03002895 list_del(&params->list);
2896 kfree(params);
2897 }
2898
Johan Hedberg55af49a82014-07-02 17:37:26 +03002899 BT_DBG("All LE disabled connection parameters were removed");
2900}
2901
2902/* This function requires the caller holds hdev->lock */
Johan Hedberg030e7f82015-11-10 09:44:53 +02002903static void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002904{
2905 struct hci_conn_params *params, *tmp;
2906
Johan Hedbergf6c63242014-08-15 21:06:59 +03002907 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2908 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002909
2910 BT_DBG("All LE connection parameters were removed");
2911}
2912
Johan Hedberga1f4c312014-02-27 14:05:41 +02002913/* Copy the Identity Address of the controller.
2914 *
2915 * If the controller has a public BD_ADDR, then by default use that one.
2916 * If this is a LE only controller without a public address, default to
2917 * the static random address.
2918 *
2919 * For debugging purposes it is possible to force controllers with a
2920 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002921 *
2922 * In case BR/EDR has been disabled on a dual-mode controller and
2923 * userspace has configured a static address, then that address
2924 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002925 */
2926void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2927 u8 *bdaddr_type)
2928{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002929 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002930 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002931 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002932 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002933 bacpy(bdaddr, &hdev->static_addr);
2934 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2935 } else {
2936 bacpy(bdaddr, &hdev->bdaddr);
2937 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2938 }
2939}
2940
David Herrmann9be0dab2012-04-22 14:39:57 +02002941/* Alloc HCI device */
2942struct hci_dev *hci_alloc_dev(void)
2943{
2944 struct hci_dev *hdev;
2945
Johan Hedberg27f70f32014-07-21 10:50:06 +03002946 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02002947 if (!hdev)
2948 return NULL;
2949
David Herrmannb1b813d2012-04-22 14:39:58 +02002950 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2951 hdev->esco_type = (ESCO_HV1);
2952 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002953 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2954 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02002955 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002956 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2957 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02002958 hdev->adv_instance_cnt = 0;
2959 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02002960 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02002961
David Herrmannb1b813d2012-04-22 14:39:58 +02002962 hdev->sniff_max_interval = 800;
2963 hdev->sniff_min_interval = 80;
2964
Marcel Holtmann3f959d42014-02-20 11:55:56 -08002965 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02002966 hdev->le_adv_min_interval = 0x0800;
2967 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002968 hdev->le_scan_interval = 0x0060;
2969 hdev->le_scan_window = 0x0030;
Jonas Holmbergb48c3b592017-02-23 15:17:02 +01002970 hdev->le_conn_min_interval = 0x0018;
2971 hdev->le_conn_max_interval = 0x0028;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02002972 hdev->le_conn_latency = 0x0000;
2973 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01002974 hdev->le_def_tx_len = 0x001b;
2975 hdev->le_def_tx_time = 0x0148;
2976 hdev->le_max_tx_len = 0x001b;
2977 hdev->le_max_tx_time = 0x0148;
2978 hdev->le_max_rx_len = 0x001b;
2979 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002980
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002981 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01002982 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02002983 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2984 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002985
David Herrmannb1b813d2012-04-22 14:39:58 +02002986 mutex_init(&hdev->lock);
2987 mutex_init(&hdev->req_lock);
2988
2989 INIT_LIST_HEAD(&hdev->mgmt_pending);
2990 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03002991 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02002992 INIT_LIST_HEAD(&hdev->uuids);
2993 INIT_LIST_HEAD(&hdev->link_keys);
2994 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002995 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02002996 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002997 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03002998 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03002999 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003000 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003001 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02003002 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02003003
3004 INIT_WORK(&hdev->rx_work, hci_rx_work);
3005 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3006 INIT_WORK(&hdev->tx_work, hci_tx_work);
3007 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003008 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003009
David Herrmannb1b813d2012-04-22 14:39:58 +02003010 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
David Herrmannb1b813d2012-04-22 14:39:58 +02003011
David Herrmannb1b813d2012-04-22 14:39:58 +02003012 skb_queue_head_init(&hdev->rx_q);
3013 skb_queue_head_init(&hdev->cmd_q);
3014 skb_queue_head_init(&hdev->raw_q);
3015
3016 init_waitqueue_head(&hdev->req_wait_q);
3017
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003018 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003019
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003020 hci_request_setup(hdev);
3021
David Herrmannb1b813d2012-04-22 14:39:58 +02003022 hci_init_sysfs(hdev);
3023 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003024
3025 return hdev;
3026}
3027EXPORT_SYMBOL(hci_alloc_dev);
3028
3029/* Free HCI device */
3030void hci_free_dev(struct hci_dev *hdev)
3031{
David Herrmann9be0dab2012-04-22 14:39:57 +02003032 /* will free via device release */
3033 put_device(&hdev->dev);
3034}
3035EXPORT_SYMBOL(hci_free_dev);
3036
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037/* Register HCI device */
3038int hci_register_dev(struct hci_dev *hdev)
3039{
David Herrmannb1b813d2012-04-22 14:39:58 +02003040 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041
Marcel Holtmann74292d52014-07-06 15:50:27 +02003042 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043 return -EINVAL;
3044
Mat Martineau08add512011-11-02 16:18:36 -07003045 /* Do not allow HCI_AMP devices to register at index 0,
3046 * so the index can be used as the AMP controller ID.
3047 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003048 switch (hdev->dev_type) {
Marcel Holtmannca8bee52016-07-05 14:30:14 +02003049 case HCI_PRIMARY:
Sasha Levin3df92b32012-05-27 22:36:56 +02003050 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3051 break;
3052 case HCI_AMP:
3053 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3054 break;
3055 default:
3056 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003058
Sasha Levin3df92b32012-05-27 22:36:56 +02003059 if (id < 0)
3060 return id;
3061
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062 sprintf(hdev->name, "hci%d", id);
3063 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003064
3065 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3066
Kees Cookd8537542013-07-03 15:04:57 -07003067 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3068 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003069 if (!hdev->workqueue) {
3070 error = -ENOMEM;
3071 goto err;
3072 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003073
Kees Cookd8537542013-07-03 15:04:57 -07003074 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3075 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003076 if (!hdev->req_workqueue) {
3077 destroy_workqueue(hdev->workqueue);
3078 error = -ENOMEM;
3079 goto err;
3080 }
3081
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003082 if (!IS_ERR_OR_NULL(bt_debugfs))
3083 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3084
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003085 dev_set_name(&hdev->dev, "%s", hdev->name);
3086
3087 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003088 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003089 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +01003091 hci_leds_init(hdev);
3092
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003093 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003094 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3095 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003096 if (hdev->rfkill) {
3097 if (rfkill_register(hdev->rfkill) < 0) {
3098 rfkill_destroy(hdev->rfkill);
3099 hdev->rfkill = NULL;
3100 }
3101 }
3102
Johan Hedberg5e130362013-09-13 08:58:17 +03003103 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003104 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003105
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003106 hci_dev_set_flag(hdev, HCI_SETUP);
3107 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003108
Marcel Holtmannca8bee52016-07-05 14:30:14 +02003109 if (hdev->dev_type == HCI_PRIMARY) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003110 /* Assume BR/EDR support until proven otherwise (such as
3111 * through reading supported features during init.
3112 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003113 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003114 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003115
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003116 write_lock(&hci_dev_list_lock);
3117 list_add(&hdev->list, &hci_dev_list);
3118 write_unlock(&hci_dev_list_lock);
3119
Marcel Holtmann4a964402014-07-02 19:10:33 +02003120 /* Devices that are marked for raw-only usage are unconfigured
3121 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003122 */
3123 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003124 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003125
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003126 hci_sock_dev_event(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003127 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128
Johan Hedberg19202572013-01-14 22:33:51 +02003129 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003130
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003132
David Herrmann33ca9542011-10-08 14:58:49 +02003133err_wqueue:
3134 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003135 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003136err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003137 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003138
David Herrmann33ca9542011-10-08 14:58:49 +02003139 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140}
3141EXPORT_SYMBOL(hci_register_dev);
3142
3143/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003144void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003146 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003147
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003148 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003150 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003151
Sasha Levin3df92b32012-05-27 22:36:56 +02003152 id = hdev->id;
3153
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003154 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003156 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003158 cancel_work_sync(&hdev->power_on);
3159
Jiri Slabybf389cab2016-05-13 10:38:49 +02003160 hci_dev_do_close(hdev);
3161
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003162 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003163 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3164 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003165 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003166 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003167 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003168 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003169
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003170 /* mgmt_index_removed should take care of emptying the
3171 * pending list */
3172 BUG_ON(!list_empty(&hdev->mgmt_pending));
3173
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003174 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003176 if (hdev->rfkill) {
3177 rfkill_unregister(hdev->rfkill);
3178 rfkill_destroy(hdev->rfkill);
3179 }
3180
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003181 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003182
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003183 debugfs_remove_recursive(hdev->debugfs);
Marcel Holtmann5177a832016-07-17 19:55:16 +02003184 kfree_const(hdev->hw_info);
3185 kfree_const(hdev->fw_info);
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003186
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003187 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003188 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003189
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003190 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003191 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003192 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003193 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003194 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003195 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003196 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003197 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003198 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003199 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003200 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003201 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003202 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003203
David Herrmanndc946bd2012-01-07 15:47:24 +01003204 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003205
3206 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207}
3208EXPORT_SYMBOL(hci_unregister_dev);
3209
3210/* Suspend HCI device */
3211int hci_suspend_dev(struct hci_dev *hdev)
3212{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003213 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214 return 0;
3215}
3216EXPORT_SYMBOL(hci_suspend_dev);
3217
3218/* Resume HCI device */
3219int hci_resume_dev(struct hci_dev *hdev)
3220{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003221 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 return 0;
3223}
3224EXPORT_SYMBOL(hci_resume_dev);
3225
Marcel Holtmann75e05692014-11-02 08:15:38 +01003226/* Reset HCI device */
3227int hci_reset_dev(struct hci_dev *hdev)
3228{
3229 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3230 struct sk_buff *skb;
3231
3232 skb = bt_skb_alloc(3, GFP_ATOMIC);
3233 if (!skb)
3234 return -ENOMEM;
3235
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003236 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
Marcel Holtmann75e05692014-11-02 08:15:38 +01003237 memcpy(skb_put(skb, 3), hw_err, 3);
3238
3239 /* Send Hardware Error to upper stack */
3240 return hci_recv_frame(hdev, skb);
3241}
3242EXPORT_SYMBOL(hci_reset_dev);
3243
Marcel Holtmann76bca882009-11-18 00:40:39 +01003244/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003245int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003246{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003247 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003248 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003249 kfree_skb(skb);
3250 return -ENXIO;
3251 }
3252
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003253 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3254 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3255 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmannfe806dc2015-10-08 03:14:28 +02003256 kfree_skb(skb);
3257 return -EINVAL;
3258 }
3259
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003260 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003261 bt_cb(skb)->incoming = 1;
3262
3263 /* Time stamp */
3264 __net_timestamp(skb);
3265
Marcel Holtmann76bca882009-11-18 00:40:39 +01003266 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003267 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003268
Marcel Holtmann76bca882009-11-18 00:40:39 +01003269 return 0;
3270}
3271EXPORT_SYMBOL(hci_recv_frame);
3272
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003273/* Receive diagnostic message from HCI drivers */
3274int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3275{
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003276 /* Mark as diagnostic packet */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003277 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003278
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003279 /* Time stamp */
3280 __net_timestamp(skb);
3281
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003282 skb_queue_tail(&hdev->rx_q, skb);
3283 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003284
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003285 return 0;
3286}
3287EXPORT_SYMBOL(hci_recv_diag);
3288
Marcel Holtmann5177a832016-07-17 19:55:16 +02003289void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3290{
3291 va_list vargs;
3292
3293 va_start(vargs, fmt);
3294 kfree_const(hdev->hw_info);
3295 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3296 va_end(vargs);
3297}
3298EXPORT_SYMBOL(hci_set_hw_info);
3299
3300void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3301{
3302 va_list vargs;
3303
3304 va_start(vargs, fmt);
3305 kfree_const(hdev->fw_info);
3306 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3307 va_end(vargs);
3308}
3309EXPORT_SYMBOL(hci_set_fw_info);
3310
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311/* ---- Interface to upper protocols ---- */
3312
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313int hci_register_cb(struct hci_cb *cb)
3314{
3315 BT_DBG("%p name %s", cb, cb->name);
3316
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003317 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003318 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003319 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320
3321 return 0;
3322}
3323EXPORT_SYMBOL(hci_register_cb);
3324
3325int hci_unregister_cb(struct hci_cb *cb)
3326{
3327 BT_DBG("%p name %s", cb, cb->name);
3328
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003329 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003331 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332
3333 return 0;
3334}
3335EXPORT_SYMBOL(hci_unregister_cb);
3336
Marcel Holtmann51086992013-10-10 14:54:19 -07003337static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003339 int err;
3340
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003341 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3342 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003344 /* Time stamp */
3345 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003347 /* Send copy to monitor */
3348 hci_send_to_monitor(hdev, skb);
3349
3350 if (atomic_read(&hdev->promisc)) {
3351 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003352 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 }
3354
3355 /* Get rid of skb owner, prior to sending to the driver. */
3356 skb_orphan(skb);
3357
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003358 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3359 kfree_skb(skb);
3360 return;
3361 }
3362
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003363 err = hdev->send(hdev, skb);
3364 if (err < 0) {
3365 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3366 kfree_skb(skb);
3367 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368}
3369
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003370/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003371int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3372 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003373{
3374 struct sk_buff *skb;
3375
3376 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3377
3378 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3379 if (!skb) {
3380 BT_ERR("%s no memory for command", hdev->name);
3381 return -ENOMEM;
3382 }
3383
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003384 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003385 * single-command requests.
3386 */
Johan Hedberg44d27132015-11-05 09:31:40 +02003387 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg11714b32013-03-05 20:37:47 +02003388
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003390 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391
3392 return 0;
3393}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394
3395/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003396void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397{
3398 struct hci_command_hdr *hdr;
3399
3400 if (!hdev->sent_cmd)
3401 return NULL;
3402
3403 hdr = (void *) hdev->sent_cmd->data;
3404
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003405 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 return NULL;
3407
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003408 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409
3410 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3411}
3412
Loic Poulainfbef1682015-09-29 15:05:44 +02003413/* Send HCI command and wait for command commplete event */
3414struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3415 const void *param, u32 timeout)
3416{
3417 struct sk_buff *skb;
3418
3419 if (!test_bit(HCI_UP, &hdev->flags))
3420 return ERR_PTR(-ENETDOWN);
3421
3422 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3423
Johan Hedbergb5044302015-11-10 09:44:55 +02003424 hci_req_sync_lock(hdev);
Loic Poulainfbef1682015-09-29 15:05:44 +02003425 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
Johan Hedbergb5044302015-11-10 09:44:55 +02003426 hci_req_sync_unlock(hdev);
Loic Poulainfbef1682015-09-29 15:05:44 +02003427
3428 return skb;
3429}
3430EXPORT_SYMBOL(hci_cmd_sync);
3431
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432/* Send ACL data */
3433static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3434{
3435 struct hci_acl_hdr *hdr;
3436 int len = skb->len;
3437
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003438 skb_push(skb, HCI_ACL_HDR_SIZE);
3439 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003440 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003441 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3442 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443}
3444
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003445static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003446 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003448 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 struct hci_dev *hdev = conn->hdev;
3450 struct sk_buff *list;
3451
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003452 skb->len = skb_headlen(skb);
3453 skb->data_len = 0;
3454
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003455 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003456
3457 switch (hdev->dev_type) {
Marcel Holtmannca8bee52016-07-05 14:30:14 +02003458 case HCI_PRIMARY:
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003459 hci_add_acl_hdr(skb, conn->handle, flags);
3460 break;
3461 case HCI_AMP:
3462 hci_add_acl_hdr(skb, chan->handle, flags);
3463 break;
3464 default:
3465 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3466 return;
3467 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003468
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003469 list = skb_shinfo(skb)->frag_list;
3470 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 /* Non fragmented */
3472 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3473
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003474 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475 } else {
3476 /* Fragmented */
3477 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3478
3479 skb_shinfo(skb)->frag_list = NULL;
3480
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003481 /* Queue all fragments atomically. We need to use spin_lock_bh
3482 * here because of 6LoWPAN links, as there this function is
3483 * called from softirq and using normal spin lock could cause
3484 * deadlocks.
3485 */
3486 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003488 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003489
3490 flags &= ~ACL_START;
3491 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 do {
3493 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003494
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003495 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003496 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497
3498 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3499
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003500 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501 } while (list);
3502
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003503 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003505}
3506
3507void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3508{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003509 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003510
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003511 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003512
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003513 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003515 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517
3518/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003519void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520{
3521 struct hci_dev *hdev = conn->hdev;
3522 struct hci_sco_hdr hdr;
3523
3524 BT_DBG("%s len %d", hdev->name, skb->len);
3525
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003526 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 hdr.dlen = skb->len;
3528
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003529 skb_push(skb, HCI_SCO_HDR_SIZE);
3530 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003531 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003533 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003534
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003536 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538
3539/* ---- HCI TX task (outgoing data) ---- */
3540
3541/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003542static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3543 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544{
3545 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003546 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003547 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003549 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003551
3552 rcu_read_lock();
3553
3554 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003555 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003557
3558 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3559 continue;
3560
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 num++;
3562
3563 if (c->sent < min) {
3564 min = c->sent;
3565 conn = c;
3566 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003567
3568 if (hci_conn_num(hdev, type) == num)
3569 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570 }
3571
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003572 rcu_read_unlock();
3573
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003575 int cnt, q;
3576
3577 switch (conn->type) {
3578 case ACL_LINK:
3579 cnt = hdev->acl_cnt;
3580 break;
3581 case SCO_LINK:
3582 case ESCO_LINK:
3583 cnt = hdev->sco_cnt;
3584 break;
3585 case LE_LINK:
3586 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3587 break;
3588 default:
3589 cnt = 0;
3590 BT_ERR("Unknown link type");
3591 }
3592
3593 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594 *quote = q ? q : 1;
3595 } else
3596 *quote = 0;
3597
3598 BT_DBG("conn %p quote %d", conn, *quote);
3599 return conn;
3600}
3601
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003602static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603{
3604 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003605 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606
Ville Tervobae1f5d92011-02-10 22:38:53 -03003607 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003609 rcu_read_lock();
3610
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003612 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003613 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003614 BT_ERR("%s killing stalled connection %pMR",
3615 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003616 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 }
3618 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003619
3620 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621}
3622
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003623static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3624 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003625{
3626 struct hci_conn_hash *h = &hdev->conn_hash;
3627 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003628 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003629 struct hci_conn *conn;
3630 int cnt, q, conn_num = 0;
3631
3632 BT_DBG("%s", hdev->name);
3633
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003634 rcu_read_lock();
3635
3636 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003637 struct hci_chan *tmp;
3638
3639 if (conn->type != type)
3640 continue;
3641
3642 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3643 continue;
3644
3645 conn_num++;
3646
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003647 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003648 struct sk_buff *skb;
3649
3650 if (skb_queue_empty(&tmp->data_q))
3651 continue;
3652
3653 skb = skb_peek(&tmp->data_q);
3654 if (skb->priority < cur_prio)
3655 continue;
3656
3657 if (skb->priority > cur_prio) {
3658 num = 0;
3659 min = ~0;
3660 cur_prio = skb->priority;
3661 }
3662
3663 num++;
3664
3665 if (conn->sent < min) {
3666 min = conn->sent;
3667 chan = tmp;
3668 }
3669 }
3670
3671 if (hci_conn_num(hdev, type) == conn_num)
3672 break;
3673 }
3674
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003675 rcu_read_unlock();
3676
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003677 if (!chan)
3678 return NULL;
3679
3680 switch (chan->conn->type) {
3681 case ACL_LINK:
3682 cnt = hdev->acl_cnt;
3683 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003684 case AMP_LINK:
3685 cnt = hdev->block_cnt;
3686 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003687 case SCO_LINK:
3688 case ESCO_LINK:
3689 cnt = hdev->sco_cnt;
3690 break;
3691 case LE_LINK:
3692 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3693 break;
3694 default:
3695 cnt = 0;
3696 BT_ERR("Unknown link type");
3697 }
3698
3699 q = cnt / num;
3700 *quote = q ? q : 1;
3701 BT_DBG("chan %p quote %d", chan, *quote);
3702 return chan;
3703}
3704
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003705static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3706{
3707 struct hci_conn_hash *h = &hdev->conn_hash;
3708 struct hci_conn *conn;
3709 int num = 0;
3710
3711 BT_DBG("%s", hdev->name);
3712
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003713 rcu_read_lock();
3714
3715 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003716 struct hci_chan *chan;
3717
3718 if (conn->type != type)
3719 continue;
3720
3721 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3722 continue;
3723
3724 num++;
3725
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003726 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003727 struct sk_buff *skb;
3728
3729 if (chan->sent) {
3730 chan->sent = 0;
3731 continue;
3732 }
3733
3734 if (skb_queue_empty(&chan->data_q))
3735 continue;
3736
3737 skb = skb_peek(&chan->data_q);
3738 if (skb->priority >= HCI_PRIO_MAX - 1)
3739 continue;
3740
3741 skb->priority = HCI_PRIO_MAX - 1;
3742
3743 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003744 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003745 }
3746
3747 if (hci_conn_num(hdev, type) == num)
3748 break;
3749 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003750
3751 rcu_read_unlock();
3752
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003753}
3754
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003755static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3756{
3757 /* Calculate count of blocks used by this packet */
3758 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3759}
3760
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003761static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003763 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764 /* ACL tx timeout must be longer than maximum
3765 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003766 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003767 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003768 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003770}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003772static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003773{
3774 unsigned int cnt = hdev->acl_cnt;
3775 struct hci_chan *chan;
3776 struct sk_buff *skb;
3777 int quote;
3778
3779 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003780
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003781 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003782 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003783 u32 priority = (skb_peek(&chan->data_q))->priority;
3784 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003785 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003786 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003787
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003788 /* Stop if priority has changed */
3789 if (skb->priority < priority)
3790 break;
3791
3792 skb = skb_dequeue(&chan->data_q);
3793
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003794 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003795 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003796
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003797 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798 hdev->acl_last_tx = jiffies;
3799
3800 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003801 chan->sent++;
3802 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803 }
3804 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003805
3806 if (cnt != hdev->acl_cnt)
3807 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808}
3809
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003810static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003811{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003812 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003813 struct hci_chan *chan;
3814 struct sk_buff *skb;
3815 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003816 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003817
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003818 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003819
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003820 BT_DBG("%s", hdev->name);
3821
3822 if (hdev->dev_type == HCI_AMP)
3823 type = AMP_LINK;
3824 else
3825 type = ACL_LINK;
3826
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003827 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003828 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003829 u32 priority = (skb_peek(&chan->data_q))->priority;
3830 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3831 int blocks;
3832
3833 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003834 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003835
3836 /* Stop if priority has changed */
3837 if (skb->priority < priority)
3838 break;
3839
3840 skb = skb_dequeue(&chan->data_q);
3841
3842 blocks = __get_blocks(hdev, skb);
3843 if (blocks > hdev->block_cnt)
3844 return;
3845
3846 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003847 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003848
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003849 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003850 hdev->acl_last_tx = jiffies;
3851
3852 hdev->block_cnt -= blocks;
3853 quote -= blocks;
3854
3855 chan->sent += blocks;
3856 chan->conn->sent += blocks;
3857 }
3858 }
3859
3860 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003861 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003862}
3863
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003864static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003865{
3866 BT_DBG("%s", hdev->name);
3867
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003868 /* No ACL link over BR/EDR controller */
Marcel Holtmannca8bee52016-07-05 14:30:14 +02003869 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003870 return;
3871
3872 /* No AMP link over AMP controller */
3873 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003874 return;
3875
3876 switch (hdev->flow_ctl_mode) {
3877 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3878 hci_sched_acl_pkt(hdev);
3879 break;
3880
3881 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3882 hci_sched_acl_blk(hdev);
3883 break;
3884 }
3885}
3886
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003888static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889{
3890 struct hci_conn *conn;
3891 struct sk_buff *skb;
3892 int quote;
3893
3894 BT_DBG("%s", hdev->name);
3895
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003896 if (!hci_conn_num(hdev, SCO_LINK))
3897 return;
3898
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3900 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3901 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003902 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003903
3904 conn->sent++;
3905 if (conn->sent == ~0)
3906 conn->sent = 0;
3907 }
3908 }
3909}
3910
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003911static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003912{
3913 struct hci_conn *conn;
3914 struct sk_buff *skb;
3915 int quote;
3916
3917 BT_DBG("%s", hdev->name);
3918
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003919 if (!hci_conn_num(hdev, ESCO_LINK))
3920 return;
3921
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003922 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3923 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003924 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3925 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003926 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003927
3928 conn->sent++;
3929 if (conn->sent == ~0)
3930 conn->sent = 0;
3931 }
3932 }
3933}
3934
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003935static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003936{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003937 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003938 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003939 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003940
3941 BT_DBG("%s", hdev->name);
3942
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003943 if (!hci_conn_num(hdev, LE_LINK))
3944 return;
3945
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003946 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003947 /* LE tx timeout must be longer than maximum
3948 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003949 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003950 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003951 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003952 }
3953
3954 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003955 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003956 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003957 u32 priority = (skb_peek(&chan->data_q))->priority;
3958 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003959 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003960 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003961
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003962 /* Stop if priority has changed */
3963 if (skb->priority < priority)
3964 break;
3965
3966 skb = skb_dequeue(&chan->data_q);
3967
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003968 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003969 hdev->le_last_tx = jiffies;
3970
3971 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003972 chan->sent++;
3973 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003974 }
3975 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003976
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003977 if (hdev->le_pkts)
3978 hdev->le_cnt = cnt;
3979 else
3980 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003981
3982 if (cnt != tmp)
3983 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003984}
3985
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003986static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003988 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989 struct sk_buff *skb;
3990
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003991 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003992 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003994 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07003995 /* Schedule queues and send stuff to HCI driver */
3996 hci_sched_acl(hdev);
3997 hci_sched_sco(hdev);
3998 hci_sched_esco(hdev);
3999 hci_sched_le(hdev);
4000 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004001
Linus Torvalds1da177e2005-04-16 15:20:36 -07004002 /* Send next queued raw (unknown type) packet */
4003 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004004 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005}
4006
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004007/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008
4009/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004010static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004011{
4012 struct hci_acl_hdr *hdr = (void *) skb->data;
4013 struct hci_conn *conn;
4014 __u16 handle, flags;
4015
4016 skb_pull(skb, HCI_ACL_HDR_SIZE);
4017
4018 handle = __le16_to_cpu(hdr->handle);
4019 flags = hci_flags(handle);
4020 handle = hci_handle(handle);
4021
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004022 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004023 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024
4025 hdev->stat.acl_rx++;
4026
4027 hci_dev_lock(hdev);
4028 conn = hci_conn_hash_lookup_handle(hdev, handle);
4029 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004030
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004032 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004033
Linus Torvalds1da177e2005-04-16 15:20:36 -07004034 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004035 l2cap_recv_acldata(conn, skb, flags);
4036 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004038 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004039 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004040 }
4041
4042 kfree_skb(skb);
4043}
4044
4045/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004046static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047{
4048 struct hci_sco_hdr *hdr = (void *) skb->data;
4049 struct hci_conn *conn;
4050 __u16 handle;
4051
4052 skb_pull(skb, HCI_SCO_HDR_SIZE);
4053
4054 handle = __le16_to_cpu(hdr->handle);
4055
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004056 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057
4058 hdev->stat.sco_rx++;
4059
4060 hci_dev_lock(hdev);
4061 conn = hci_conn_hash_lookup_handle(hdev, handle);
4062 hci_dev_unlock(hdev);
4063
4064 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004066 sco_recv_scodata(conn, skb);
4067 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004069 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004070 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071 }
4072
4073 kfree_skb(skb);
4074}
4075
Johan Hedberg9238f362013-03-05 20:37:48 +02004076static bool hci_req_is_complete(struct hci_dev *hdev)
4077{
4078 struct sk_buff *skb;
4079
4080 skb = skb_peek(&hdev->cmd_q);
4081 if (!skb)
4082 return true;
4083
Johan Hedberg44d27132015-11-05 09:31:40 +02004084 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
Johan Hedberg9238f362013-03-05 20:37:48 +02004085}
4086
Johan Hedberg42c6b122013-03-05 20:37:49 +02004087static void hci_resend_last(struct hci_dev *hdev)
4088{
4089 struct hci_command_hdr *sent;
4090 struct sk_buff *skb;
4091 u16 opcode;
4092
4093 if (!hdev->sent_cmd)
4094 return;
4095
4096 sent = (void *) hdev->sent_cmd->data;
4097 opcode = __le16_to_cpu(sent->opcode);
4098 if (opcode == HCI_OP_RESET)
4099 return;
4100
4101 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4102 if (!skb)
4103 return;
4104
4105 skb_queue_head(&hdev->cmd_q, skb);
4106 queue_work(hdev->workqueue, &hdev->cmd_work);
4107}
4108
Johan Hedberge62144872015-04-02 13:41:08 +03004109void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4110 hci_req_complete_t *req_complete,
4111 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004112{
Johan Hedberg9238f362013-03-05 20:37:48 +02004113 struct sk_buff *skb;
4114 unsigned long flags;
4115
4116 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4117
Johan Hedberg42c6b122013-03-05 20:37:49 +02004118 /* If the completed command doesn't match the last one that was
4119 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004120 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004121 if (!hci_sent_cmd_data(hdev, opcode)) {
4122 /* Some CSR based controllers generate a spontaneous
4123 * reset complete event during init and any pending
4124 * command will never be completed. In such a case we
4125 * need to resend whatever was the last sent
4126 * command.
4127 */
4128 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4129 hci_resend_last(hdev);
4130
Johan Hedberg9238f362013-03-05 20:37:48 +02004131 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004132 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004133
4134 /* If the command succeeded and there's still more commands in
4135 * this request the request is not yet complete.
4136 */
4137 if (!status && !hci_req_is_complete(hdev))
4138 return;
4139
4140 /* If this was the last command in a request the complete
4141 * callback would be found in hdev->sent_cmd instead of the
4142 * command queue (hdev->cmd_q).
4143 */
Johan Hedberg44d27132015-11-05 09:31:40 +02004144 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4145 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
Johan Hedberge62144872015-04-02 13:41:08 +03004146 return;
4147 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004148
Johan Hedberg44d27132015-11-05 09:31:40 +02004149 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4150 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
Johan Hedberge62144872015-04-02 13:41:08 +03004151 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004152 }
4153
4154 /* Remove all pending commands belonging to this request */
4155 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4156 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Johan Hedberg44d27132015-11-05 09:31:40 +02004157 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004158 __skb_queue_head(&hdev->cmd_q, skb);
4159 break;
4160 }
4161
Douglas Anderson3bd75942016-02-19 14:25:21 -08004162 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4163 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4164 else
4165 *req_complete = bt_cb(skb)->hci.req_complete;
Johan Hedberg9238f362013-03-05 20:37:48 +02004166 kfree_skb(skb);
4167 }
4168 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004169}
4170
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004171static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004173 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174 struct sk_buff *skb;
4175
4176 BT_DBG("%s", hdev->name);
4177
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004179 /* Send copy to monitor */
4180 hci_send_to_monitor(hdev, skb);
4181
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182 if (atomic_read(&hdev->promisc)) {
4183 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004184 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 }
4186
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004187 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 kfree_skb(skb);
4189 continue;
4190 }
4191
4192 if (test_bit(HCI_INIT, &hdev->flags)) {
4193 /* Don't process data packets in this states. */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004194 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195 case HCI_ACLDATA_PKT:
4196 case HCI_SCODATA_PKT:
4197 kfree_skb(skb);
4198 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004199 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200 }
4201
4202 /* Process frame */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004203 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004205 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206 hci_event_packet(hdev, skb);
4207 break;
4208
4209 case HCI_ACLDATA_PKT:
4210 BT_DBG("%s ACL data packet", hdev->name);
4211 hci_acldata_packet(hdev, skb);
4212 break;
4213
4214 case HCI_SCODATA_PKT:
4215 BT_DBG("%s SCO data packet", hdev->name);
4216 hci_scodata_packet(hdev, skb);
4217 break;
4218
4219 default:
4220 kfree_skb(skb);
4221 break;
4222 }
4223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224}
4225
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004226static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004228 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229 struct sk_buff *skb;
4230
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004231 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4232 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004235 if (atomic_read(&hdev->cmd_cnt)) {
4236 skb = skb_dequeue(&hdev->cmd_q);
4237 if (!skb)
4238 return;
4239
Wei Yongjun7585b972009-02-25 18:29:52 +08004240 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004242 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004243 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004245 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004246 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004247 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004248 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004249 schedule_delayed_work(&hdev->cmd_timer,
4250 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251 } else {
4252 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004253 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254 }
4255 }
4256}