blob: 88f1ef3589d8a6b02c88a3b11b34c56e0b2b4bbc [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +010043#include "leds.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020044
Marcel Holtmannb78752c2010-08-08 23:06:53 -040045static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020046static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020047static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049/* HCI device list */
50LIST_HEAD(hci_dev_list);
51DEFINE_RWLOCK(hci_dev_list_lock);
52
53/* HCI callback list */
54LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020055DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Sasha Levin3df92b32012-05-27 22:36:56 +020057/* HCI ID Numbering */
58static DEFINE_IDA(hci_index_ida);
59
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070060/* ---- HCI debugfs entries ---- */
61
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070062static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64{
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
Prasanna Karthik74b93e92015-11-18 12:38:41 +000068 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070069 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72}
73
74static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76{
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
79 char buf[32];
80 size_t buf_size = min(count, (sizeof(buf)-1));
81 bool enable;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070093 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070094 return -EALREADY;
95
Johan Hedbergb5044302015-11-10 09:44:55 +020096 hci_req_sync_lock(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070097 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
Johan Hedbergb5044302015-11-10 09:44:55 +0200103 hci_req_sync_unlock(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 kfree_skb(skb);
109
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700111
112 return count;
113}
114
115static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120};
121
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200122static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124{
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
Prasanna Karthik74b93e92015-11-18 12:38:41 +0000128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132}
133
134static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136{
137 struct hci_dev *hdev = file->private_data;
138 char buf[32];
139 size_t buf_size = min(count, (sizeof(buf)-1));
140 bool enable;
141 int err;
142
143 if (copy_from_user(buf, user_buf, buf_size))
144 return -EFAULT;
145
146 buf[buf_size] = '\0';
147 if (strtobool(buf, &enable))
148 return -EINVAL;
149
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200150 /* When the diagnostic flags are not persistent and the transport
151 * is not active, then there is no need for the vendor callback.
152 *
153 * Instead just store the desired value. If needed the setting
154 * will be programmed when the controller gets powered on.
155 */
156 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
157 !test_bit(HCI_RUNNING, &hdev->flags))
158 goto done;
159
Johan Hedbergb5044302015-11-10 09:44:55 +0200160 hci_req_sync_lock(hdev);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200161 err = hdev->set_diag(hdev, enable);
Johan Hedbergb5044302015-11-10 09:44:55 +0200162 hci_req_sync_unlock(hdev);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200163
164 if (err < 0)
165 return err;
166
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200167done:
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200168 if (enable)
169 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170 else
171 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173 return count;
174}
175
176static const struct file_operations vendor_diag_fops = {
177 .open = simple_open,
178 .read = vendor_diag_read,
179 .write = vendor_diag_write,
180 .llseek = default_llseek,
181};
182
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200183static void hci_debugfs_create_basic(struct hci_dev *hdev)
184{
185 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186 &dut_mode_fops);
187
188 if (hdev->set_diag)
189 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190 &vendor_diag_fops);
191}
192
Johan Hedberga1d01db2015-11-11 08:11:25 +0200193static int hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200195 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200198 set_bit(HCI_RESET, &req->hdev->flags);
199 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200200 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201}
202
Johan Hedberg42c6b122013-03-05 20:37:49 +0200203static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200205 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200208 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200210 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200211 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200212
213 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215}
216
Johan Hedberg0af801b2015-02-17 15:05:21 +0200217static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200218{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200220
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200221 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200222 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300223
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700224 /* Read Local Supported Commands */
225 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300227 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200228 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300229
230 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200231 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700232
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700233 /* Read Flow Control Mode */
234 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700236 /* Read Location Data */
237 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200238}
239
Johan Hedberga1d01db2015-11-11 08:11:25 +0200240static int amp_init2(struct hci_request *req)
Johan Hedberg0af801b2015-02-17 15:05:21 +0200241{
242 /* Read Local Supported Features. Not all AMP controllers
243 * support this so it's placed conditionally in the second
244 * stage init.
245 */
246 if (req->hdev->commands[14] & 0x20)
247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200248
249 return 0;
Johan Hedberg0af801b2015-02-17 15:05:21 +0200250}
251
Johan Hedberga1d01db2015-11-11 08:11:25 +0200252static int hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200253{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200254 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200255
256 BT_DBG("%s %ld", hdev->name, opt);
257
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300258 /* Reset */
259 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200260 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300261
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200262 switch (hdev->dev_type) {
263 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200264 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200265 break;
266
267 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200268 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200269 break;
270
271 default:
272 BT_ERR("Unknown device type %d", hdev->dev_type);
273 break;
274 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200275
276 return 0;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200277}
278
Johan Hedberg42c6b122013-03-05 20:37:49 +0200279static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200280{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200281 __le16 param;
282 __u8 flt_type;
283
284 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200286
287 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200288 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200289
290 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200292
293 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200295
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700296 /* Read Number of Supported IAC */
297 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
298
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700299 /* Read Current IAC LAP */
300 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
301
Johan Hedberg2177bab2013-03-05 20:37:43 +0200302 /* Clear Event Filters */
303 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200304 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200305
306 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700307 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200309}
310
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200312{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300313 struct hci_dev *hdev = req->hdev;
314
Johan Hedberg2177bab2013-03-05 20:37:43 +0200315 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200316 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200317
318 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200320
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800321 /* Read LE Supported States */
322 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
323
Johan Hedbergc73eee92013-04-19 18:35:21 +0300324 /* LE-only controllers have LE implicitly enabled */
325 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700326 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200327}
328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200330{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 struct hci_dev *hdev = req->hdev;
332
Johan Hedberg2177bab2013-03-05 20:37:43 +0200333 /* The second byte is 0xff instead of 0x9f (two reserved bits
334 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
335 * command otherwise.
336 */
337 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
338
339 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
340 * any event mask for pre 1.2 devices.
341 */
342 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
343 return;
344
345 if (lmp_bredr_capable(hdev)) {
346 events[4] |= 0x01; /* Flow Specification Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700347 } else {
348 /* Use a different default for LE-only devices */
349 memset(events, 0, sizeof(events));
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700350 events[1] |= 0x20; /* Command Complete */
351 events[1] |= 0x40; /* Command Status */
352 events[1] |= 0x80; /* Hardware Error */
Marcel Holtmann5c3d3b42015-11-04 07:17:23 +0100353
354 /* If the controller supports the Disconnect command, enable
355 * the corresponding event. In addition enable packet flow
356 * control related events.
357 */
358 if (hdev->commands[0] & 0x20) {
359 events[0] |= 0x10; /* Disconnection Complete */
360 events[2] |= 0x04; /* Number of Completed Packets */
361 events[3] |= 0x02; /* Data Buffer Overflow */
362 }
363
364 /* If the controller supports the Read Remote Version
365 * Information command, enable the corresponding event.
366 */
367 if (hdev->commands[2] & 0x80)
368 events[1] |= 0x08; /* Read Remote Version Information
369 * Complete
370 */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200371
372 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
373 events[0] |= 0x80; /* Encryption Change */
374 events[5] |= 0x80; /* Encryption Key Refresh Complete */
375 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200376 }
377
Marcel Holtmann9fe759c2015-11-01 09:45:22 +0100378 if (lmp_inq_rssi_capable(hdev) ||
379 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380 events[4] |= 0x02; /* Inquiry Result with RSSI */
381
Marcel Holtmann70f56aa2015-11-01 09:39:49 +0100382 if (lmp_ext_feat_capable(hdev))
383 events[4] |= 0x04; /* Read Remote Extended Features Complete */
384
385 if (lmp_esco_capable(hdev)) {
386 events[5] |= 0x08; /* Synchronous Connection Complete */
387 events[5] |= 0x10; /* Synchronous Connection Changed */
388 }
389
Johan Hedberg2177bab2013-03-05 20:37:43 +0200390 if (lmp_sniffsubr_capable(hdev))
391 events[5] |= 0x20; /* Sniff Subrating */
392
393 if (lmp_pause_enc_capable(hdev))
394 events[5] |= 0x80; /* Encryption Key Refresh Complete */
395
396 if (lmp_ext_inq_capable(hdev))
397 events[5] |= 0x40; /* Extended Inquiry Result */
398
399 if (lmp_no_flush_capable(hdev))
400 events[7] |= 0x01; /* Enhanced Flush Complete */
401
402 if (lmp_lsto_capable(hdev))
403 events[6] |= 0x80; /* Link Supervision Timeout Changed */
404
405 if (lmp_ssp_capable(hdev)) {
406 events[6] |= 0x01; /* IO Capability Request */
407 events[6] |= 0x02; /* IO Capability Response */
408 events[6] |= 0x04; /* User Confirmation Request */
409 events[6] |= 0x08; /* User Passkey Request */
410 events[6] |= 0x10; /* Remote OOB Data Request */
411 events[6] |= 0x20; /* Simple Pairing Complete */
412 events[7] |= 0x04; /* User Passkey Notification */
413 events[7] |= 0x08; /* Keypress Notification */
414 events[7] |= 0x10; /* Remote Host Supported
415 * Features Notification
416 */
417 }
418
419 if (lmp_le_capable(hdev))
420 events[7] |= 0x20; /* LE Meta-Event */
421
Johan Hedberg42c6b122013-03-05 20:37:49 +0200422 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200423}
424
Johan Hedberga1d01db2015-11-11 08:11:25 +0200425static int hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200426{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427 struct hci_dev *hdev = req->hdev;
428
Johan Hedberg0af801b2015-02-17 15:05:21 +0200429 if (hdev->dev_type == HCI_AMP)
430 return amp_init2(req);
431
Johan Hedberg2177bab2013-03-05 20:37:43 +0200432 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200433 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300434 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700435 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436
437 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200438 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100440 /* All Bluetooth 1.2 and later controllers should support the
441 * HCI command for reading the local supported commands.
442 *
443 * Unfortunately some controllers indicate Bluetooth 1.2 support,
444 * but do not have support for this command. If that is the case,
445 * the driver can quirk the behavior and skip reading the local
446 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300447 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100448 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
449 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200450 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451
452 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700453 /* When SSP is available, then the host features page
454 * should also be available as well. However some
455 * controllers list the max_page as 0 as long as SSP
456 * has not been enabled. To achieve proper debugging
457 * output, force the minimum max_page to 1 at least.
458 */
459 hdev->max_page = 0x01;
460
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700461 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200462 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800463
Johan Hedberg42c6b122013-03-05 20:37:49 +0200464 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
465 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200466 } else {
467 struct hci_cp_write_eir cp;
468
469 memset(hdev->eir, 0, sizeof(hdev->eir));
470 memset(&cp, 0, sizeof(cp));
471
Johan Hedberg42c6b122013-03-05 20:37:49 +0200472 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200473 }
474 }
475
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800476 if (lmp_inq_rssi_capable(hdev) ||
477 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800478 u8 mode;
479
480 /* If Extended Inquiry Result events are supported, then
481 * they are clearly preferred over Inquiry Result with RSSI
482 * events.
483 */
484 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
485
486 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
487 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200488
489 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491
492 if (lmp_ext_feat_capable(hdev)) {
493 struct hci_cp_read_local_ext_features cp;
494
495 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200496 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
497 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498 }
499
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700500 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200501 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
503 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200504 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200505
506 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507}
508
Johan Hedberg42c6b122013-03-05 20:37:49 +0200509static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200510{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200512 struct hci_cp_write_def_link_policy cp;
513 u16 link_policy = 0;
514
515 if (lmp_rswitch_capable(hdev))
516 link_policy |= HCI_LP_RSWITCH;
517 if (lmp_hold_capable(hdev))
518 link_policy |= HCI_LP_HOLD;
519 if (lmp_sniff_capable(hdev))
520 link_policy |= HCI_LP_SNIFF;
521 if (lmp_park_capable(hdev))
522 link_policy |= HCI_LP_PARK;
523
524 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200525 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200526}
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200530 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200531 struct hci_cp_write_le_host_supported cp;
532
Johan Hedbergc73eee92013-04-19 18:35:21 +0300533 /* LE-only devices do not support explicit enablement */
534 if (!lmp_bredr_capable(hdev))
535 return;
536
Johan Hedberg2177bab2013-03-05 20:37:43 +0200537 memset(&cp, 0, sizeof(cp));
538
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700539 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200540 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200541 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200542 }
543
544 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
546 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200547}
548
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300549static void hci_set_event_mask_page_2(struct hci_request *req)
550{
551 struct hci_dev *hdev = req->hdev;
552 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
553
554 /* If Connectionless Slave Broadcast master role is supported
555 * enable all necessary events for it.
556 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800557 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300558 events[1] |= 0x40; /* Triggered Clock Capture */
559 events[1] |= 0x80; /* Synchronization Train Complete */
560 events[2] |= 0x10; /* Slave Page Response Timeout */
561 events[2] |= 0x20; /* CSB Channel Map Change */
562 }
563
564 /* If Connectionless Slave Broadcast slave role is supported
565 * enable all necessary events for it.
566 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800567 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300568 events[2] |= 0x01; /* Synchronization Train Received */
569 events[2] |= 0x02; /* CSB Receive */
570 events[2] |= 0x04; /* CSB Timeout */
571 events[2] |= 0x08; /* Truncated Page Complete */
572 }
573
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800574 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200575 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800576 events[2] |= 0x80;
577
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300578 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
579}
580
Johan Hedberga1d01db2015-11-11 08:11:25 +0200581static int hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200582{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200583 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300584 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200585
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200586 hci_setup_event_mask(req);
587
Johan Hedberge81be902015-08-30 21:47:20 +0300588 if (hdev->commands[6] & 0x20 &&
589 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800590 struct hci_cp_read_stored_link_key cp;
591
592 bacpy(&cp.bdaddr, BDADDR_ANY);
593 cp.read_all = 0x01;
594 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
595 }
596
Johan Hedberg2177bab2013-03-05 20:37:43 +0200597 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200598 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200599
Marcel Holtmann417287d2014-12-11 20:21:54 +0100600 if (hdev->commands[8] & 0x01)
601 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
602
603 /* Some older Broadcom based Bluetooth 1.2 controllers do not
604 * support the Read Page Scan Type command. Check support for
605 * this command in the bit mask of supported commands.
606 */
607 if (hdev->commands[13] & 0x01)
608 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
609
Andre Guedes9193c6e2014-07-01 18:10:09 -0300610 if (lmp_le_capable(hdev)) {
611 u8 events[8];
612
613 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200614
615 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
616 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300617
618 /* If controller supports the Connection Parameters Request
619 * Link Layer Procedure, enable the corresponding event.
620 */
621 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
622 events[0] |= 0x20; /* LE Remote Connection
623 * Parameter Request
624 */
625
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100626 /* If the controller supports the Data Length Extension
627 * feature, enable the corresponding event.
628 */
629 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
630 events[0] |= 0x40; /* LE Data Length Change */
631
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100632 /* If the controller supports Extended Scanner Filter
633 * Policies, enable the correspondig event.
634 */
635 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
636 events[1] |= 0x04; /* LE Direct Advertising
637 * Report
638 */
639
Marcel Holtmann7d26f5c2015-11-01 09:39:51 +0100640 /* If the controller supports the LE Set Scan Enable command,
641 * enable the corresponding advertising report event.
642 */
643 if (hdev->commands[26] & 0x08)
644 events[0] |= 0x02; /* LE Advertising Report */
645
646 /* If the controller supports the LE Create Connection
647 * command, enable the corresponding event.
648 */
649 if (hdev->commands[26] & 0x10)
650 events[0] |= 0x01; /* LE Connection Complete */
651
652 /* If the controller supports the LE Connection Update
653 * command, enable the corresponding event.
654 */
655 if (hdev->commands[27] & 0x04)
656 events[0] |= 0x04; /* LE Connection Update
657 * Complete
658 */
659
660 /* If the controller supports the LE Read Remote Used Features
661 * command, enable the corresponding event.
662 */
663 if (hdev->commands[27] & 0x20)
664 events[0] |= 0x08; /* LE Read Remote Used
665 * Features Complete
666 */
667
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100668 /* If the controller supports the LE Read Local P-256
669 * Public Key command, enable the corresponding event.
670 */
671 if (hdev->commands[34] & 0x02)
672 events[0] |= 0x80; /* LE Read Local P-256
673 * Public Key Complete
674 */
675
676 /* If the controller supports the LE Generate DHKey
677 * command, enable the corresponding event.
678 */
679 if (hdev->commands[34] & 0x04)
680 events[1] |= 0x01; /* LE Generate DHKey Complete */
681
Andre Guedes9193c6e2014-07-01 18:10:09 -0300682 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
683 events);
684
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200685 if (hdev->commands[25] & 0x40) {
686 /* Read LE Advertising Channel TX Power */
687 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
688 }
689
Marcel Holtmann2ab216a2015-11-01 09:39:48 +0100690 if (hdev->commands[26] & 0x40) {
691 /* Read LE White List Size */
692 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
693 0, NULL);
694 }
695
696 if (hdev->commands[26] & 0x80) {
697 /* Clear LE White List */
698 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
699 }
700
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100701 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
702 /* Read LE Maximum Data Length */
703 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
704
705 /* Read LE Suggested Default Data Length */
706 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
707 }
708
Johan Hedberg42c6b122013-03-05 20:37:49 +0200709 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300710 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300711
712 /* Read features beyond page 1 if available */
713 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
714 struct hci_cp_read_local_ext_features cp;
715
716 cp.page = p;
717 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
718 sizeof(cp), &cp);
719 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200720
721 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200722}
723
Johan Hedberga1d01db2015-11-11 08:11:25 +0200724static int hci_init4_req(struct hci_request *req, unsigned long opt)
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300725{
726 struct hci_dev *hdev = req->hdev;
727
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800728 /* Some Broadcom based Bluetooth controllers do not support the
729 * Delete Stored Link Key command. They are clearly indicating its
730 * absence in the bit mask of supported commands.
731 *
732 * Check the supported commands and only if the the command is marked
733 * as supported send it. If not supported assume that the controller
734 * does not have actual support for stored link keys which makes this
735 * command redundant anyway.
736 *
737 * Some controllers indicate that they support handling deleting
738 * stored link keys, but they don't. The quirk lets a driver
739 * just disable this command.
740 */
741 if (hdev->commands[6] & 0x80 &&
742 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
743 struct hci_cp_delete_stored_link_key cp;
744
745 bacpy(&cp.bdaddr, BDADDR_ANY);
746 cp.delete_all = 0x01;
747 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
748 sizeof(cp), &cp);
749 }
750
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300751 /* Set event mask page 2 if the HCI command for it is supported */
752 if (hdev->commands[22] & 0x04)
753 hci_set_event_mask_page_2(req);
754
Marcel Holtmann109e3192014-07-23 19:24:56 +0200755 /* Read local codec list if the HCI command is supported */
756 if (hdev->commands[29] & 0x20)
757 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
758
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200759 /* Get MWS transport configuration if the HCI command is supported */
760 if (hdev->commands[30] & 0x08)
761 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
762
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300763 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800764 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300765 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800766
767 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700768 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800769 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800770 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800771
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800772 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
773 sizeof(support), &support);
774 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200775
776 return 0;
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300777}
778
Johan Hedberg2177bab2013-03-05 20:37:43 +0200779static int __hci_init(struct hci_dev *hdev)
780{
781 int err;
782
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200783 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200784 if (err < 0)
785 return err;
786
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200787 if (hci_dev_test_flag(hdev, HCI_SETUP))
788 hci_debugfs_create_basic(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700789
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200790 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200791 if (err < 0)
792 return err;
793
Johan Hedberg0af801b2015-02-17 15:05:21 +0200794 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
795 * BR/EDR/LE type controllers. AMP controllers only need the
796 * first two stages of init.
797 */
798 if (hdev->dev_type != HCI_BREDR)
799 return 0;
800
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200801 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300802 if (err < 0)
803 return err;
804
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200805 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700806 if (err < 0)
807 return err;
808
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800809 /* This function is only called when the controller is actually in
810 * configured state. When the controller is marked as unconfigured,
811 * this initialization procedure is not run.
812 *
813 * It means that it is possible that a controller runs through its
814 * setup phase and then discovers missing settings. If that is the
815 * case, then this function will not be called. It then will only
816 * be called during the config phase.
817 *
818 * So only when in setup phase or config phase, create the debugfs
819 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700820 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700821 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
822 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700823 return 0;
824
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100825 hci_debugfs_create_common(hdev);
826
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100827 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100828 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700829
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800830 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100831 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700832
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700833 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200834}
835
Johan Hedberga1d01db2015-11-11 08:11:25 +0200836static int hci_init0_req(struct hci_request *req, unsigned long opt)
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200837{
838 struct hci_dev *hdev = req->hdev;
839
840 BT_DBG("%s %ld", hdev->name, opt);
841
842 /* Reset */
843 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
844 hci_reset_req(req, 0);
845
846 /* Read Local Version */
847 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
848
849 /* Read BD Address */
850 if (hdev->set_bdaddr)
851 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200852
853 return 0;
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200854}
855
856static int __hci_unconf_init(struct hci_dev *hdev)
857{
858 int err;
859
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200860 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
861 return 0;
862
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200863 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200864 if (err < 0)
865 return err;
866
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200867 if (hci_dev_test_flag(hdev, HCI_SETUP))
868 hci_debugfs_create_basic(hdev);
869
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200870 return 0;
871}
872
Johan Hedberga1d01db2015-11-11 08:11:25 +0200873static int hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874{
875 __u8 scan = opt;
876
Johan Hedberg42c6b122013-03-05 20:37:49 +0200877 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
879 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200880 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200881 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882}
883
Johan Hedberga1d01db2015-11-11 08:11:25 +0200884static int hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885{
886 __u8 auth = opt;
887
Johan Hedberg42c6b122013-03-05 20:37:49 +0200888 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200891 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200892 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893}
894
Johan Hedberga1d01db2015-11-11 08:11:25 +0200895static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
897 __u8 encrypt = opt;
898
Johan Hedberg42c6b122013-03-05 20:37:49 +0200899 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200901 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200902 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200903 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904}
905
Johan Hedberga1d01db2015-11-11 08:11:25 +0200906static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200907{
908 __le16 policy = cpu_to_le16(opt);
909
Johan Hedberg42c6b122013-03-05 20:37:49 +0200910 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200911
912 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200913 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200914 return 0;
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200915}
916
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900917/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 * Device is held on return. */
919struct hci_dev *hci_dev_get(int index)
920{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200921 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
923 BT_DBG("%d", index);
924
925 if (index < 0)
926 return NULL;
927
928 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200929 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 if (d->id == index) {
931 hdev = hci_dev_hold(d);
932 break;
933 }
934 }
935 read_unlock(&hci_dev_list_lock);
936 return hdev;
937}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200940
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200941bool hci_discovery_active(struct hci_dev *hdev)
942{
943 struct discovery_state *discov = &hdev->discovery;
944
Andre Guedes6fbe1952012-02-03 17:47:58 -0300945 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300946 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300947 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200948 return true;
949
Andre Guedes6fbe1952012-02-03 17:47:58 -0300950 default:
951 return false;
952 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200953}
954
Johan Hedbergff9ef572012-01-04 14:23:45 +0200955void hci_discovery_set_state(struct hci_dev *hdev, int state)
956{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300957 int old_state = hdev->discovery.state;
958
Johan Hedbergff9ef572012-01-04 14:23:45 +0200959 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
960
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300961 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +0200962 return;
963
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300964 hdev->discovery.state = state;
965
Johan Hedbergff9ef572012-01-04 14:23:45 +0200966 switch (state) {
967 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -0300968 hci_update_background_scan(hdev);
969
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300970 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -0300971 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200972 break;
973 case DISCOVERY_STARTING:
974 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300975 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200976 mgmt_discovering(hdev, 1);
977 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200978 case DISCOVERY_RESOLVING:
979 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200980 case DISCOVERY_STOPPING:
981 break;
982 }
Johan Hedbergff9ef572012-01-04 14:23:45 +0200983}
984
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300985void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986{
Johan Hedberg30883512012-01-04 14:16:21 +0200987 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200988 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989
Johan Hedberg561aafb2012-01-04 13:31:59 +0200990 list_for_each_entry_safe(p, n, &cache->all, all) {
991 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200992 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200994
995 INIT_LIST_HEAD(&cache->unknown);
996 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997}
998
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300999struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1000 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001{
Johan Hedberg30883512012-01-04 14:16:21 +02001002 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 struct inquiry_entry *e;
1004
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001005 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
Johan Hedberg561aafb2012-01-04 13:31:59 +02001007 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001009 return e;
1010 }
1011
1012 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013}
1014
Johan Hedberg561aafb2012-01-04 13:31:59 +02001015struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001016 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001017{
Johan Hedberg30883512012-01-04 14:16:21 +02001018 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001019 struct inquiry_entry *e;
1020
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001021 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001022
1023 list_for_each_entry(e, &cache->unknown, list) {
1024 if (!bacmp(&e->data.bdaddr, bdaddr))
1025 return e;
1026 }
1027
1028 return NULL;
1029}
1030
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001031struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001032 bdaddr_t *bdaddr,
1033 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001034{
1035 struct discovery_state *cache = &hdev->discovery;
1036 struct inquiry_entry *e;
1037
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001038 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001039
1040 list_for_each_entry(e, &cache->resolve, list) {
1041 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1042 return e;
1043 if (!bacmp(&e->data.bdaddr, bdaddr))
1044 return e;
1045 }
1046
1047 return NULL;
1048}
1049
Johan Hedberga3d4e202012-01-09 00:53:02 +02001050void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001051 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001052{
1053 struct discovery_state *cache = &hdev->discovery;
1054 struct list_head *pos = &cache->resolve;
1055 struct inquiry_entry *p;
1056
1057 list_del(&ie->list);
1058
1059 list_for_each_entry(p, &cache->resolve, list) {
1060 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001061 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001062 break;
1063 pos = &p->list;
1064 }
1065
1066 list_add(&ie->list, pos);
1067}
1068
Marcel Holtmannaf589252014-07-01 14:11:20 +02001069u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1070 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071{
Johan Hedberg30883512012-01-04 14:16:21 +02001072 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001073 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001074 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001076 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
Johan Hedberg6928a922014-10-26 20:46:09 +01001078 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001079
Marcel Holtmannaf589252014-07-01 14:11:20 +02001080 if (!data->ssp_mode)
1081 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001082
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001083 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001084 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001085 if (!ie->data.ssp_mode)
1086 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001087
Johan Hedberga3d4e202012-01-09 00:53:02 +02001088 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001089 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001090 ie->data.rssi = data->rssi;
1091 hci_inquiry_cache_update_resolve(hdev, ie);
1092 }
1093
Johan Hedberg561aafb2012-01-04 13:31:59 +02001094 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001095 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001096
Johan Hedberg561aafb2012-01-04 13:31:59 +02001097 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001098 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001099 if (!ie) {
1100 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1101 goto done;
1102 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001103
1104 list_add(&ie->all, &cache->all);
1105
1106 if (name_known) {
1107 ie->name_state = NAME_KNOWN;
1108 } else {
1109 ie->name_state = NAME_NOT_KNOWN;
1110 list_add(&ie->list, &cache->unknown);
1111 }
1112
1113update:
1114 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001115 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001116 ie->name_state = NAME_KNOWN;
1117 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 }
1119
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001120 memcpy(&ie->data, data, sizeof(*data));
1121 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001123
1124 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001125 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001126
Marcel Holtmannaf589252014-07-01 14:11:20 +02001127done:
1128 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129}
1130
1131static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1132{
Johan Hedberg30883512012-01-04 14:16:21 +02001133 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 struct inquiry_info *info = (struct inquiry_info *) buf;
1135 struct inquiry_entry *e;
1136 int copied = 0;
1137
Johan Hedberg561aafb2012-01-04 13:31:59 +02001138 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001140
1141 if (copied >= num)
1142 break;
1143
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 bacpy(&info->bdaddr, &data->bdaddr);
1145 info->pscan_rep_mode = data->pscan_rep_mode;
1146 info->pscan_period_mode = data->pscan_period_mode;
1147 info->pscan_mode = data->pscan_mode;
1148 memcpy(info->dev_class, data->dev_class, 3);
1149 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001152 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 }
1154
1155 BT_DBG("cache %p, copied %d", cache, copied);
1156 return copied;
1157}
1158
Johan Hedberga1d01db2015-11-11 08:11:25 +02001159static int hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160{
1161 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001162 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 struct hci_cp_inquiry cp;
1164
1165 BT_DBG("%s", hdev->name);
1166
1167 if (test_bit(HCI_INQUIRY, &hdev->flags))
Johan Hedberga1d01db2015-11-11 08:11:25 +02001168 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
1170 /* Start Inquiry */
1171 memcpy(&cp.lap, &ir->lap, 3);
1172 cp.length = ir->length;
1173 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001174 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001175
1176 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177}
1178
1179int hci_inquiry(void __user *arg)
1180{
1181 __u8 __user *ptr = arg;
1182 struct hci_inquiry_req ir;
1183 struct hci_dev *hdev;
1184 int err = 0, do_inquiry = 0, max_rsp;
1185 long timeo;
1186 __u8 *buf;
1187
1188 if (copy_from_user(&ir, ptr, sizeof(ir)))
1189 return -EFAULT;
1190
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001191 hdev = hci_dev_get(ir.dev_id);
1192 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 return -ENODEV;
1194
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001195 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001196 err = -EBUSY;
1197 goto done;
1198 }
1199
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001200 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001201 err = -EOPNOTSUPP;
1202 goto done;
1203 }
1204
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001205 if (hdev->dev_type != HCI_BREDR) {
1206 err = -EOPNOTSUPP;
1207 goto done;
1208 }
1209
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001210 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001211 err = -EOPNOTSUPP;
1212 goto done;
1213 }
1214
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001215 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001216 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001217 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001218 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 do_inquiry = 1;
1220 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001221 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Marcel Holtmann04837f62006-07-03 10:02:33 +02001223 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001224
1225 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001226 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001227 timeo, NULL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001228 if (err < 0)
1229 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001230
1231 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1232 * cleared). If it is interrupted by a signal, return -EINTR.
1233 */
NeilBrown74316202014-07-07 15:16:04 +10001234 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001235 TASK_INTERRUPTIBLE))
1236 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001237 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001239 /* for unlimited number of responses we will use buffer with
1240 * 255 entries
1241 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1243
1244 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1245 * copy it to the user space.
1246 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001247 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001248 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 err = -ENOMEM;
1250 goto done;
1251 }
1252
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001253 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001255 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
1257 BT_DBG("num_rsp %d", ir.num_rsp);
1258
1259 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1260 ptr += sizeof(ir);
1261 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001262 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001264 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 err = -EFAULT;
1266
1267 kfree(buf);
1268
1269done:
1270 hci_dev_put(hdev);
1271 return err;
1272}
1273
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001274static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 int ret = 0;
1277
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 BT_DBG("%s %p", hdev->name, hdev);
1279
Johan Hedbergb5044302015-11-10 09:44:55 +02001280 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001282 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001283 ret = -ENODEV;
1284 goto done;
1285 }
1286
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001287 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1288 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001289 /* Check for rfkill but allow the HCI setup stage to
1290 * proceed (which in itself doesn't cause any RF activity).
1291 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001292 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001293 ret = -ERFKILL;
1294 goto done;
1295 }
1296
1297 /* Check for valid public address or a configured static
1298 * random adddress, but let the HCI setup proceed to
1299 * be able to determine if there is a public address
1300 * or not.
1301 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001302 * In case of user channel usage, it is not important
1303 * if a public address or static random address is
1304 * available.
1305 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001306 * This check is only valid for BR/EDR controllers
1307 * since AMP controllers do not have an address.
1308 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001309 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001310 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001311 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1312 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1313 ret = -EADDRNOTAVAIL;
1314 goto done;
1315 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001316 }
1317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 if (test_bit(HCI_UP, &hdev->flags)) {
1319 ret = -EALREADY;
1320 goto done;
1321 }
1322
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 if (hdev->open(hdev)) {
1324 ret = -EIO;
1325 goto done;
1326 }
1327
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001328 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001329 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001330
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001331 atomic_set(&hdev->cmd_cnt, 1);
1332 set_bit(HCI_INIT, &hdev->flags);
1333
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001334 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmanne131d742015-10-20 02:30:47 +02001335 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1336
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001337 if (hdev->setup)
1338 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001339
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001340 /* The transport driver can set these quirks before
1341 * creating the HCI device or in its setup callback.
1342 *
1343 * In case any of them is set, the controller has to
1344 * start up as unconfigured.
1345 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001346 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1347 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001348 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001349
1350 /* For an unconfigured controller it is required to
1351 * read at least the version information provided by
1352 * the Read Local Version Information command.
1353 *
1354 * If the set_bdaddr driver callback is provided, then
1355 * also the original Bluetooth public device address
1356 * will be read using the Read BD Address command.
1357 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001358 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001359 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001360 }
1361
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001362 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001363 /* If public address change is configured, ensure that
1364 * the address gets programmed. If the driver does not
1365 * support changing the public address, fail the power
1366 * on procedure.
1367 */
1368 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1369 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001370 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1371 else
1372 ret = -EADDRNOTAVAIL;
1373 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001374
1375 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001376 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001377 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001378 ret = __hci_init(hdev);
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001379 if (!ret && hdev->post_init)
1380 ret = hdev->post_init(hdev);
1381 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 }
1383
Marcel Holtmann7e995b92015-10-17 16:00:26 +02001384 /* If the HCI Reset command is clearing all diagnostic settings,
1385 * then they need to be reprogrammed after the init procedure
1386 * completed.
1387 */
1388 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1389 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1390 ret = hdev->set_diag(hdev, true);
1391
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001392 clear_bit(HCI_INIT, &hdev->flags);
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 if (!ret) {
1395 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001396 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 set_bit(HCI_UP, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001398 hci_sock_dev_event(hdev, HCI_DEV_UP);
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +01001399 hci_leds_update_powered(hdev, true);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001400 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1401 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1402 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1403 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Johan Hedberg2ff13892015-11-25 16:15:44 +02001404 hci_dev_test_flag(hdev, HCI_MGMT) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001405 hdev->dev_type == HCI_BREDR) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02001406 ret = __hci_req_hci_power_on(hdev);
1407 mgmt_power_on(hdev, ret);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001408 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001409 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001411 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001412 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001413 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414
1415 skb_queue_purge(&hdev->cmd_q);
1416 skb_queue_purge(&hdev->rx_q);
1417
1418 if (hdev->flush)
1419 hdev->flush(hdev);
1420
1421 if (hdev->sent_cmd) {
1422 kfree_skb(hdev->sent_cmd);
1423 hdev->sent_cmd = NULL;
1424 }
1425
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001426 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001427 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001428
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001430 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 }
1432
1433done:
Johan Hedbergb5044302015-11-10 09:44:55 +02001434 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 return ret;
1436}
1437
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001438/* ---- HCI ioctl helpers ---- */
1439
1440int hci_dev_open(__u16 dev)
1441{
1442 struct hci_dev *hdev;
1443 int err;
1444
1445 hdev = hci_dev_get(dev);
1446 if (!hdev)
1447 return -ENODEV;
1448
Marcel Holtmann4a964402014-07-02 19:10:33 +02001449 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001450 * up as user channel. Trying to bring them up as normal devices
1451 * will result into a failure. Only user channel operation is
1452 * possible.
1453 *
1454 * When this function is called for a user channel, the flag
1455 * HCI_USER_CHANNEL will be set first before attempting to
1456 * open the device.
1457 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001458 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1459 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001460 err = -EOPNOTSUPP;
1461 goto done;
1462 }
1463
Johan Hedberge1d08f42013-10-01 22:44:50 +03001464 /* We need to ensure that no other power on/off work is pending
1465 * before proceeding to call hci_dev_do_open. This is
1466 * particularly important if the setup procedure has not yet
1467 * completed.
1468 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001469 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001470 cancel_delayed_work(&hdev->power_off);
1471
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001472 /* After this call it is guaranteed that the setup procedure
1473 * has finished. This means that error conditions like RFKILL
1474 * or no valid public or static random address apply.
1475 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001476 flush_workqueue(hdev->req_workqueue);
1477
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001478 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001479 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001480 * so that pairing works for them. Once the management interface
1481 * is in use this bit will be cleared again and userspace has
1482 * to explicitly enable it.
1483 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001484 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1485 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001486 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001487
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001488 err = hci_dev_do_open(hdev);
1489
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001490done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001491 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001492 return err;
1493}
1494
Johan Hedbergd7347f32014-07-04 12:37:23 +03001495/* This function requires the caller holds hdev->lock */
1496static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1497{
1498 struct hci_conn_params *p;
1499
Johan Hedbergf161dd42014-08-15 21:06:54 +03001500 list_for_each_entry(p, &hdev->le_conn_params, list) {
1501 if (p->conn) {
1502 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001503 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001504 p->conn = NULL;
1505 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001506 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001507 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001508
1509 BT_DBG("All LE pending actions cleared");
1510}
1511
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001512int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513{
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001514 bool auto_off;
1515
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 BT_DBG("%s %p", hdev->name, hdev);
1517
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001518 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001519 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001520 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001521 /* Execute vendor specific shutdown routine */
1522 if (hdev->shutdown)
1523 hdev->shutdown(hdev);
1524 }
1525
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001526 cancel_delayed_work(&hdev->power_off);
1527
Johan Hedberg7df0f732015-11-12 15:15:00 +02001528 hci_request_cancel_all(hdev);
Johan Hedbergb5044302015-11-10 09:44:55 +02001529 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530
1531 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001532 cancel_delayed_work_sync(&hdev->cmd_timer);
Johan Hedbergb5044302015-11-10 09:44:55 +02001533 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 return 0;
1535 }
1536
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +01001537 hci_leds_update_powered(hdev, false);
1538
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001539 /* Flush RX and TX works */
1540 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001541 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001543 if (hdev->discov_timeout > 0) {
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001544 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001545 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1546 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001547 }
1548
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001549 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001550 cancel_delayed_work(&hdev->service_cache);
1551
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001552 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001553 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001554
Johan Hedberg76727c02014-11-18 09:00:14 +02001555 /* Avoid potential lockdep warnings from the *_flush() calls by
1556 * ensuring the workqueue is empty up front.
1557 */
1558 drain_workqueue(hdev->workqueue);
1559
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001560 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001561
Johan Hedberg8f502f82015-01-28 19:56:02 +02001562 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1563
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001564 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1565
Johan Hedberg2ff13892015-11-25 16:15:44 +02001566 if (!auto_off && hdev->dev_type == HCI_BREDR &&
1567 hci_dev_test_flag(hdev, HCI_MGMT))
1568 __mgmt_power_off(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001569
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001570 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001571 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001572 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001573 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
Marcel Holtmann64dae962015-01-28 14:10:28 -08001575 smp_unregister(hdev);
1576
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001577 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578
1579 if (hdev->flush)
1580 hdev->flush(hdev);
1581
1582 /* Reset device */
1583 skb_queue_purge(&hdev->cmd_q);
1584 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001585 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1586 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001588 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 clear_bit(HCI_INIT, &hdev->flags);
1590 }
1591
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001592 /* flush cmd work */
1593 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
1595 /* Drop queues */
1596 skb_queue_purge(&hdev->rx_q);
1597 skb_queue_purge(&hdev->cmd_q);
1598 skb_queue_purge(&hdev->raw_q);
1599
1600 /* Drop last sent command */
1601 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001602 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 kfree_skb(hdev->sent_cmd);
1604 hdev->sent_cmd = NULL;
1605 }
1606
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001607 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001608 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001609
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 /* After this point our queues are empty
1611 * and no tasks are scheduled. */
1612 hdev->close(hdev);
1613
Johan Hedberg35b973c2013-03-15 17:06:59 -05001614 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001615 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001616 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001617
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001618 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001619 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001620
Johan Hedberge59fda82012-02-22 18:11:53 +02001621 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001622 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001623 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001624
Johan Hedbergb5044302015-11-10 09:44:55 +02001625 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
1627 hci_dev_put(hdev);
1628 return 0;
1629}
1630
1631int hci_dev_close(__u16 dev)
1632{
1633 struct hci_dev *hdev;
1634 int err;
1635
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001636 hdev = hci_dev_get(dev);
1637 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001639
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001640 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001641 err = -EBUSY;
1642 goto done;
1643 }
1644
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001645 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001646 cancel_delayed_work(&hdev->power_off);
1647
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001649
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001650done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 hci_dev_put(hdev);
1652 return err;
1653}
1654
Marcel Holtmann5c912492015-01-28 11:53:05 -08001655static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001657 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
Marcel Holtmann5c912492015-01-28 11:53:05 -08001659 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
Johan Hedbergb5044302015-11-10 09:44:55 +02001661 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 /* Drop queues */
1664 skb_queue_purge(&hdev->rx_q);
1665 skb_queue_purge(&hdev->cmd_q);
1666
Johan Hedberg76727c02014-11-18 09:00:14 +02001667 /* Avoid potential lockdep warnings from the *_flush() calls by
1668 * ensuring the workqueue is empty up front.
1669 */
1670 drain_workqueue(hdev->workqueue);
1671
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001672 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001673 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001675 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
1677 if (hdev->flush)
1678 hdev->flush(hdev);
1679
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001680 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001681 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001683 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684
Johan Hedbergb5044302015-11-10 09:44:55 +02001685 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 return ret;
1687}
1688
Marcel Holtmann5c912492015-01-28 11:53:05 -08001689int hci_dev_reset(__u16 dev)
1690{
1691 struct hci_dev *hdev;
1692 int err;
1693
1694 hdev = hci_dev_get(dev);
1695 if (!hdev)
1696 return -ENODEV;
1697
1698 if (!test_bit(HCI_UP, &hdev->flags)) {
1699 err = -ENETDOWN;
1700 goto done;
1701 }
1702
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001703 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001704 err = -EBUSY;
1705 goto done;
1706 }
1707
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001708 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001709 err = -EOPNOTSUPP;
1710 goto done;
1711 }
1712
1713 err = hci_dev_do_reset(hdev);
1714
1715done:
1716 hci_dev_put(hdev);
1717 return err;
1718}
1719
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720int hci_dev_reset_stat(__u16 dev)
1721{
1722 struct hci_dev *hdev;
1723 int ret = 0;
1724
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001725 hdev = hci_dev_get(dev);
1726 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 return -ENODEV;
1728
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001729 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001730 ret = -EBUSY;
1731 goto done;
1732 }
1733
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001734 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001735 ret = -EOPNOTSUPP;
1736 goto done;
1737 }
1738
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1740
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001741done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 return ret;
1744}
1745
Johan Hedberg123abc02014-07-10 12:09:07 +03001746static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1747{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001748 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001749
1750 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1751
1752 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001753 conn_changed = !hci_dev_test_and_set_flag(hdev,
1754 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001755 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001756 conn_changed = hci_dev_test_and_clear_flag(hdev,
1757 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001758
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001759 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001760 discov_changed = !hci_dev_test_and_set_flag(hdev,
1761 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001762 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001763 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001764 discov_changed = hci_dev_test_and_clear_flag(hdev,
1765 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001766 }
1767
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001768 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001769 return;
1770
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001771 if (conn_changed || discov_changed) {
1772 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001773 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001774
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001775 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02001776 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001777
Johan Hedberg123abc02014-07-10 12:09:07 +03001778 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001779 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001780}
1781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782int hci_dev_cmd(unsigned int cmd, void __user *arg)
1783{
1784 struct hci_dev *hdev;
1785 struct hci_dev_req dr;
1786 int err = 0;
1787
1788 if (copy_from_user(&dr, arg, sizeof(dr)))
1789 return -EFAULT;
1790
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001791 hdev = hci_dev_get(dr.dev_id);
1792 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 return -ENODEV;
1794
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001795 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001796 err = -EBUSY;
1797 goto done;
1798 }
1799
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001800 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001801 err = -EOPNOTSUPP;
1802 goto done;
1803 }
1804
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001805 if (hdev->dev_type != HCI_BREDR) {
1806 err = -EOPNOTSUPP;
1807 goto done;
1808 }
1809
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001810 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001811 err = -EOPNOTSUPP;
1812 goto done;
1813 }
1814
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 switch (cmd) {
1816 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001817 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001818 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 break;
1820
1821 case HCISETENCRYPT:
1822 if (!lmp_encrypt_capable(hdev)) {
1823 err = -EOPNOTSUPP;
1824 break;
1825 }
1826
1827 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1828 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001829 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001830 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 if (err)
1832 break;
1833 }
1834
Johan Hedberg01178cd2013-03-05 20:37:41 +02001835 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001836 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 break;
1838
1839 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001840 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001841 HCI_INIT_TIMEOUT, NULL);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001842
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001843 /* Ensure that the connectable and discoverable states
1844 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001845 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001846 if (!err)
1847 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 break;
1849
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001850 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001851 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001852 HCI_INIT_TIMEOUT, NULL);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001853 break;
1854
1855 case HCISETLINKMODE:
1856 hdev->link_mode = ((__u16) dr.dev_opt) &
1857 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1858 break;
1859
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 case HCISETPTYPE:
1861 hdev->pkt_type = (__u16) dr.dev_opt;
1862 break;
1863
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001865 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1866 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 break;
1868
1869 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001870 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1871 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 break;
1873
1874 default:
1875 err = -EINVAL;
1876 break;
1877 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001878
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001879done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 hci_dev_put(hdev);
1881 return err;
1882}
1883
1884int hci_get_dev_list(void __user *arg)
1885{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001886 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 struct hci_dev_list_req *dl;
1888 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 int n = 0, size, err;
1890 __u16 dev_num;
1891
1892 if (get_user(dev_num, (__u16 __user *) arg))
1893 return -EFAULT;
1894
1895 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1896 return -EINVAL;
1897
1898 size = sizeof(*dl) + dev_num * sizeof(*dr);
1899
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001900 dl = kzalloc(size, GFP_KERNEL);
1901 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 return -ENOMEM;
1903
1904 dr = dl->dev_req;
1905
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001906 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001907 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001908 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001909
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001910 /* When the auto-off is configured it means the transport
1911 * is running, but in that case still indicate that the
1912 * device is actually down.
1913 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001914 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001915 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001918 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001919
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 if (++n >= dev_num)
1921 break;
1922 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001923 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
1925 dl->dev_num = n;
1926 size = sizeof(*dl) + n * sizeof(*dr);
1927
1928 err = copy_to_user(arg, dl, size);
1929 kfree(dl);
1930
1931 return err ? -EFAULT : 0;
1932}
1933
1934int hci_get_dev_info(void __user *arg)
1935{
1936 struct hci_dev *hdev;
1937 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001938 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 int err = 0;
1940
1941 if (copy_from_user(&di, arg, sizeof(di)))
1942 return -EFAULT;
1943
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001944 hdev = hci_dev_get(di.dev_id);
1945 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 return -ENODEV;
1947
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001948 /* When the auto-off is configured it means the transport
1949 * is running, but in that case still indicate that the
1950 * device is actually down.
1951 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001952 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001953 flags = hdev->flags & ~BIT(HCI_UP);
1954 else
1955 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001956
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 strcpy(di.name, hdev->name);
1958 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001959 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001960 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001962 if (lmp_bredr_capable(hdev)) {
1963 di.acl_mtu = hdev->acl_mtu;
1964 di.acl_pkts = hdev->acl_pkts;
1965 di.sco_mtu = hdev->sco_mtu;
1966 di.sco_pkts = hdev->sco_pkts;
1967 } else {
1968 di.acl_mtu = hdev->le_mtu;
1969 di.acl_pkts = hdev->le_pkts;
1970 di.sco_mtu = 0;
1971 di.sco_pkts = 0;
1972 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 di.link_policy = hdev->link_policy;
1974 di.link_mode = hdev->link_mode;
1975
1976 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1977 memcpy(&di.features, &hdev->features, sizeof(di.features));
1978
1979 if (copy_to_user(arg, &di, sizeof(di)))
1980 err = -EFAULT;
1981
1982 hci_dev_put(hdev);
1983
1984 return err;
1985}
1986
1987/* ---- Interface to HCI drivers ---- */
1988
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001989static int hci_rfkill_set_block(void *data, bool blocked)
1990{
1991 struct hci_dev *hdev = data;
1992
1993 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1994
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001995 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001996 return -EBUSY;
1997
Johan Hedberg5e130362013-09-13 08:58:17 +03001998 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001999 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002000 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2001 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002002 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002003 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002004 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002005 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002006
2007 return 0;
2008}
2009
2010static const struct rfkill_ops hci_rfkill_ops = {
2011 .set_block = hci_rfkill_set_block,
2012};
2013
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002014static void hci_power_on(struct work_struct *work)
2015{
2016 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002017 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002018
2019 BT_DBG("%s", hdev->name);
2020
Johan Hedberg2ff13892015-11-25 16:15:44 +02002021 if (test_bit(HCI_UP, &hdev->flags) &&
2022 hci_dev_test_flag(hdev, HCI_MGMT) &&
2023 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2024 hci_req_sync_lock(hdev);
2025 err = __hci_req_hci_power_on(hdev);
2026 hci_req_sync_unlock(hdev);
2027 mgmt_power_on(hdev, err);
2028 return;
2029 }
2030
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002031 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002032 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302033 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002034 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302035 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002036 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002037 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002038
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002039 /* During the HCI setup phase, a few error conditions are
2040 * ignored and they need to be checked now. If they are still
2041 * valid, it is important to turn the device back off.
2042 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002043 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2044 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002045 (hdev->dev_type == HCI_BREDR &&
2046 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2047 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002048 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002049 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002050 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002051 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2052 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002053 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002054
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002055 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002056 /* For unconfigured devices, set the HCI_RAW flag
2057 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002058 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002059 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002060 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002061
2062 /* For fully configured devices, this will send
2063 * the Index Added event. For unconfigured devices,
2064 * it will send Unconfigued Index Added event.
2065 *
2066 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2067 * and no event will be send.
2068 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002069 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002070 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002071 /* When the controller is now configured, then it
2072 * is important to clear the HCI_RAW flag.
2073 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002074 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002075 clear_bit(HCI_RAW, &hdev->flags);
2076
Marcel Holtmannd603b762014-07-06 12:11:14 +02002077 /* Powering on the controller with HCI_CONFIG set only
2078 * happens with the transition from unconfigured to
2079 * configured. This will send the Index Added event.
2080 */
2081 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002082 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002083}
2084
2085static void hci_power_off(struct work_struct *work)
2086{
Johan Hedberg32435532011-11-07 22:16:04 +02002087 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002088 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002089
2090 BT_DBG("%s", hdev->name);
2091
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002092 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002093}
2094
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002095static void hci_error_reset(struct work_struct *work)
2096{
2097 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2098
2099 BT_DBG("%s", hdev->name);
2100
2101 if (hdev->hw_error)
2102 hdev->hw_error(hdev, hdev->hw_error_code);
2103 else
2104 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2105 hdev->hw_error_code);
2106
2107 if (hci_dev_do_close(hdev))
2108 return;
2109
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002110 hci_dev_do_open(hdev);
2111}
2112
Johan Hedberg35f74982014-02-18 17:14:32 +02002113void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002114{
Johan Hedberg48210022013-01-27 00:31:28 +02002115 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002116
Johan Hedberg48210022013-01-27 00:31:28 +02002117 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2118 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002119 kfree(uuid);
2120 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002121}
2122
Johan Hedberg35f74982014-02-18 17:14:32 +02002123void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002124{
Johan Hedberg0378b592014-11-19 15:22:22 +02002125 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002126
Johan Hedberg0378b592014-11-19 15:22:22 +02002127 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2128 list_del_rcu(&key->list);
2129 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002130 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002131}
2132
Johan Hedberg35f74982014-02-18 17:14:32 +02002133void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002134{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002135 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002136
Johan Hedberg970d0f12014-11-13 14:37:47 +02002137 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2138 list_del_rcu(&k->list);
2139 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002140 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002141}
2142
Johan Hedberg970c4e42014-02-18 10:19:33 +02002143void hci_smp_irks_clear(struct hci_dev *hdev)
2144{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002145 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002146
Johan Hedbergadae20c2014-11-13 14:37:48 +02002147 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2148 list_del_rcu(&k->list);
2149 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002150 }
2151}
2152
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002153struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2154{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002155 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002156
Johan Hedberg0378b592014-11-19 15:22:22 +02002157 rcu_read_lock();
2158 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2159 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2160 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002161 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002162 }
2163 }
2164 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002165
2166 return NULL;
2167}
2168
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302169static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002170 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002171{
2172 /* Legacy key */
2173 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302174 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002175
2176 /* Debug keys are insecure so don't store them persistently */
2177 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302178 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002179
2180 /* Changed combination key and there's no previous one */
2181 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302182 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002183
2184 /* Security mode 3 case */
2185 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302186 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002187
Johan Hedberge3befab2014-06-01 16:33:39 +03002188 /* BR/EDR key derived using SC from an LE link */
2189 if (conn->type == LE_LINK)
2190 return true;
2191
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002192 /* Neither local nor remote side had no-bonding as requirement */
2193 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302194 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002195
2196 /* Local side had dedicated bonding as requirement */
2197 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302198 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002199
2200 /* Remote side had dedicated bonding as requirement */
2201 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302202 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002203
2204 /* If none of the above criteria match, then don't store the key
2205 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302206 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002207}
2208
Johan Hedberge804d252014-07-16 11:42:28 +03002209static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002210{
Johan Hedberge804d252014-07-16 11:42:28 +03002211 if (type == SMP_LTK)
2212 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002213
Johan Hedberge804d252014-07-16 11:42:28 +03002214 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002215}
2216
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002217struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2218 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002219{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002220 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002221
Johan Hedberg970d0f12014-11-13 14:37:47 +02002222 rcu_read_lock();
2223 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002224 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2225 continue;
2226
Johan Hedberg923e2412014-12-03 12:43:39 +02002227 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002228 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002229 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002230 }
2231 }
2232 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002233
2234 return NULL;
2235}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002236
Johan Hedberg970c4e42014-02-18 10:19:33 +02002237struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2238{
2239 struct smp_irk *irk;
2240
Johan Hedbergadae20c2014-11-13 14:37:48 +02002241 rcu_read_lock();
2242 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2243 if (!bacmp(&irk->rpa, rpa)) {
2244 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002245 return irk;
2246 }
2247 }
2248
Johan Hedbergadae20c2014-11-13 14:37:48 +02002249 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2250 if (smp_irk_matches(hdev, irk->val, rpa)) {
2251 bacpy(&irk->rpa, rpa);
2252 rcu_read_unlock();
2253 return irk;
2254 }
2255 }
2256 rcu_read_unlock();
2257
Johan Hedberg970c4e42014-02-18 10:19:33 +02002258 return NULL;
2259}
2260
2261struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2262 u8 addr_type)
2263{
2264 struct smp_irk *irk;
2265
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002266 /* Identity Address must be public or static random */
2267 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2268 return NULL;
2269
Johan Hedbergadae20c2014-11-13 14:37:48 +02002270 rcu_read_lock();
2271 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002272 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002273 bacmp(bdaddr, &irk->bdaddr) == 0) {
2274 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002275 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002276 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002277 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002278 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002279
2280 return NULL;
2281}
2282
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002283struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002284 bdaddr_t *bdaddr, u8 *val, u8 type,
2285 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002286{
2287 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302288 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002289
2290 old_key = hci_find_link_key(hdev, bdaddr);
2291 if (old_key) {
2292 old_key_type = old_key->type;
2293 key = old_key;
2294 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002295 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002296 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002297 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002298 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002299 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002300 }
2301
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002302 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002303
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002304 /* Some buggy controller combinations generate a changed
2305 * combination key for legacy pairing even when there's no
2306 * previous key */
2307 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002308 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002309 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002310 if (conn)
2311 conn->key_type = type;
2312 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002313
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002314 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002315 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002316 key->pin_len = pin_len;
2317
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002318 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002319 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002320 else
2321 key->type = type;
2322
Johan Hedberg7652ff62014-06-24 13:15:49 +03002323 if (persistent)
2324 *persistent = hci_persistent_key(hdev, conn, type,
2325 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002326
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002327 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002328}
2329
Johan Hedbergca9142b2014-02-19 14:57:44 +02002330struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002331 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002332 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002333{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002334 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002335 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002336
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002337 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002338 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002339 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002340 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002341 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002342 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002343 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002344 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002345 }
2346
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002347 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002348 key->bdaddr_type = addr_type;
2349 memcpy(key->val, tk, sizeof(key->val));
2350 key->authenticated = authenticated;
2351 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002352 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002353 key->enc_size = enc_size;
2354 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002355
Johan Hedbergca9142b2014-02-19 14:57:44 +02002356 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002357}
2358
Johan Hedbergca9142b2014-02-19 14:57:44 +02002359struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2360 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002361{
2362 struct smp_irk *irk;
2363
2364 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2365 if (!irk) {
2366 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2367 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002368 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002369
2370 bacpy(&irk->bdaddr, bdaddr);
2371 irk->addr_type = addr_type;
2372
Johan Hedbergadae20c2014-11-13 14:37:48 +02002373 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002374 }
2375
2376 memcpy(irk->val, val, 16);
2377 bacpy(&irk->rpa, rpa);
2378
Johan Hedbergca9142b2014-02-19 14:57:44 +02002379 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002380}
2381
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002382int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2383{
2384 struct link_key *key;
2385
2386 key = hci_find_link_key(hdev, bdaddr);
2387 if (!key)
2388 return -ENOENT;
2389
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002390 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002391
Johan Hedberg0378b592014-11-19 15:22:22 +02002392 list_del_rcu(&key->list);
2393 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002394
2395 return 0;
2396}
2397
Johan Hedberge0b2b272014-02-18 17:14:31 +02002398int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002399{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002400 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002401 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002402
Johan Hedberg970d0f12014-11-13 14:37:47 +02002403 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002404 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002405 continue;
2406
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002407 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002408
Johan Hedberg970d0f12014-11-13 14:37:47 +02002409 list_del_rcu(&k->list);
2410 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002411 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002412 }
2413
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002414 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002415}
2416
Johan Hedberga7ec7332014-02-18 17:14:35 +02002417void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2418{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002419 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002420
Johan Hedbergadae20c2014-11-13 14:37:48 +02002421 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002422 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2423 continue;
2424
2425 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2426
Johan Hedbergadae20c2014-11-13 14:37:48 +02002427 list_del_rcu(&k->list);
2428 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002429 }
2430}
2431
Johan Hedberg55e76b32015-03-10 22:34:40 +02002432bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2433{
2434 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002435 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002436 u8 addr_type;
2437
2438 if (type == BDADDR_BREDR) {
2439 if (hci_find_link_key(hdev, bdaddr))
2440 return true;
2441 return false;
2442 }
2443
2444 /* Convert to HCI addr type which struct smp_ltk uses */
2445 if (type == BDADDR_LE_PUBLIC)
2446 addr_type = ADDR_LE_DEV_PUBLIC;
2447 else
2448 addr_type = ADDR_LE_DEV_RANDOM;
2449
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002450 irk = hci_get_irk(hdev, bdaddr, addr_type);
2451 if (irk) {
2452 bdaddr = &irk->bdaddr;
2453 addr_type = irk->addr_type;
2454 }
2455
Johan Hedberg55e76b32015-03-10 22:34:40 +02002456 rcu_read_lock();
2457 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002458 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2459 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002460 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002461 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002462 }
2463 rcu_read_unlock();
2464
2465 return false;
2466}
2467
Ville Tervo6bd32322011-02-16 16:32:41 +02002468/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002469static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002470{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002471 struct hci_dev *hdev = container_of(work, struct hci_dev,
2472 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002473
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002474 if (hdev->sent_cmd) {
2475 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2476 u16 opcode = __le16_to_cpu(sent->opcode);
2477
2478 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2479 } else {
2480 BT_ERR("%s command tx timeout", hdev->name);
2481 }
2482
Ville Tervo6bd32322011-02-16 16:32:41 +02002483 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002484 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002485}
2486
Szymon Janc2763eda2011-03-22 13:12:22 +01002487struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002488 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002489{
2490 struct oob_data *data;
2491
Johan Hedberg6928a922014-10-26 20:46:09 +01002492 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2493 if (bacmp(bdaddr, &data->bdaddr) != 0)
2494 continue;
2495 if (data->bdaddr_type != bdaddr_type)
2496 continue;
2497 return data;
2498 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002499
2500 return NULL;
2501}
2502
Johan Hedberg6928a922014-10-26 20:46:09 +01002503int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2504 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002505{
2506 struct oob_data *data;
2507
Johan Hedberg6928a922014-10-26 20:46:09 +01002508 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002509 if (!data)
2510 return -ENOENT;
2511
Johan Hedberg6928a922014-10-26 20:46:09 +01002512 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002513
2514 list_del(&data->list);
2515 kfree(data);
2516
2517 return 0;
2518}
2519
Johan Hedberg35f74982014-02-18 17:14:32 +02002520void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002521{
2522 struct oob_data *data, *n;
2523
2524 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2525 list_del(&data->list);
2526 kfree(data);
2527 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002528}
2529
Marcel Holtmann07988722014-01-10 02:07:29 -08002530int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002531 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002532 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002533{
2534 struct oob_data *data;
2535
Johan Hedberg6928a922014-10-26 20:46:09 +01002536 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002537 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002538 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002539 if (!data)
2540 return -ENOMEM;
2541
2542 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002543 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002544 list_add(&data->list, &hdev->remote_oob_data);
2545 }
2546
Johan Hedberg81328d52014-10-26 20:33:47 +01002547 if (hash192 && rand192) {
2548 memcpy(data->hash192, hash192, sizeof(data->hash192));
2549 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002550 if (hash256 && rand256)
2551 data->present = 0x03;
Johan Hedberg81328d52014-10-26 20:33:47 +01002552 } else {
2553 memset(data->hash192, 0, sizeof(data->hash192));
2554 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002555 if (hash256 && rand256)
2556 data->present = 0x02;
2557 else
2558 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002559 }
2560
Johan Hedberg81328d52014-10-26 20:33:47 +01002561 if (hash256 && rand256) {
2562 memcpy(data->hash256, hash256, sizeof(data->hash256));
2563 memcpy(data->rand256, rand256, sizeof(data->rand256));
2564 } else {
2565 memset(data->hash256, 0, sizeof(data->hash256));
2566 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002567 if (hash192 && rand192)
2568 data->present = 0x01;
Johan Hedberg81328d52014-10-26 20:33:47 +01002569 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002570
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002571 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002572
2573 return 0;
2574}
2575
Florian Grandeld2609b32015-06-18 03:16:34 +02002576/* This function requires the caller holds hdev->lock */
2577struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2578{
2579 struct adv_info *adv_instance;
2580
2581 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2582 if (adv_instance->instance == instance)
2583 return adv_instance;
2584 }
2585
2586 return NULL;
2587}
2588
2589/* This function requires the caller holds hdev->lock */
Prasanna Karthik74b93e92015-11-18 12:38:41 +00002590struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2591{
Florian Grandeld2609b32015-06-18 03:16:34 +02002592 struct adv_info *cur_instance;
2593
2594 cur_instance = hci_find_adv_instance(hdev, instance);
2595 if (!cur_instance)
2596 return NULL;
2597
2598 if (cur_instance == list_last_entry(&hdev->adv_instances,
2599 struct adv_info, list))
2600 return list_first_entry(&hdev->adv_instances,
2601 struct adv_info, list);
2602 else
2603 return list_next_entry(cur_instance, list);
2604}
2605
2606/* This function requires the caller holds hdev->lock */
2607int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2608{
2609 struct adv_info *adv_instance;
2610
2611 adv_instance = hci_find_adv_instance(hdev, instance);
2612 if (!adv_instance)
2613 return -ENOENT;
2614
2615 BT_DBG("%s removing %dMR", hdev->name, instance);
2616
Johan Hedbergcab054a2015-11-30 11:21:45 +02002617 if (hdev->cur_adv_instance == instance) {
2618 if (hdev->adv_instance_timeout) {
2619 cancel_delayed_work(&hdev->adv_instance_expire);
2620 hdev->adv_instance_timeout = 0;
2621 }
2622 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02002623 }
2624
Florian Grandeld2609b32015-06-18 03:16:34 +02002625 list_del(&adv_instance->list);
2626 kfree(adv_instance);
2627
2628 hdev->adv_instance_cnt--;
2629
2630 return 0;
2631}
2632
2633/* This function requires the caller holds hdev->lock */
2634void hci_adv_instances_clear(struct hci_dev *hdev)
2635{
2636 struct adv_info *adv_instance, *n;
2637
Florian Grandel5d900e42015-06-18 03:16:35 +02002638 if (hdev->adv_instance_timeout) {
2639 cancel_delayed_work(&hdev->adv_instance_expire);
2640 hdev->adv_instance_timeout = 0;
2641 }
2642
Florian Grandeld2609b32015-06-18 03:16:34 +02002643 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2644 list_del(&adv_instance->list);
2645 kfree(adv_instance);
2646 }
2647
2648 hdev->adv_instance_cnt = 0;
Johan Hedbergcab054a2015-11-30 11:21:45 +02002649 hdev->cur_adv_instance = 0x00;
Florian Grandeld2609b32015-06-18 03:16:34 +02002650}
2651
2652/* This function requires the caller holds hdev->lock */
2653int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2654 u16 adv_data_len, u8 *adv_data,
2655 u16 scan_rsp_len, u8 *scan_rsp_data,
2656 u16 timeout, u16 duration)
2657{
2658 struct adv_info *adv_instance;
2659
2660 adv_instance = hci_find_adv_instance(hdev, instance);
2661 if (adv_instance) {
2662 memset(adv_instance->adv_data, 0,
2663 sizeof(adv_instance->adv_data));
2664 memset(adv_instance->scan_rsp_data, 0,
2665 sizeof(adv_instance->scan_rsp_data));
2666 } else {
2667 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2668 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2669 return -EOVERFLOW;
2670
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002671 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002672 if (!adv_instance)
2673 return -ENOMEM;
2674
Florian Grandelfffd38b2015-06-18 03:16:47 +02002675 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002676 adv_instance->instance = instance;
2677 list_add(&adv_instance->list, &hdev->adv_instances);
2678 hdev->adv_instance_cnt++;
2679 }
2680
2681 adv_instance->flags = flags;
2682 adv_instance->adv_data_len = adv_data_len;
2683 adv_instance->scan_rsp_len = scan_rsp_len;
2684
2685 if (adv_data_len)
2686 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2687
2688 if (scan_rsp_len)
2689 memcpy(adv_instance->scan_rsp_data,
2690 scan_rsp_data, scan_rsp_len);
2691
2692 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002693 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002694
2695 if (duration == 0)
2696 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2697 else
2698 adv_instance->duration = duration;
2699
2700 BT_DBG("%s for %dMR", hdev->name, instance);
2701
2702 return 0;
2703}
2704
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002705struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002706 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002707{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002708 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002709
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002710 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002711 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002712 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002713 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002714
2715 return NULL;
2716}
2717
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002718void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002719{
Geliang Tang7eb74042015-12-18 23:33:25 +08002720 struct bdaddr_list *b, *n;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002721
Geliang Tang7eb74042015-12-18 23:33:25 +08002722 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2723 list_del(&b->list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002724 kfree(b);
2725 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002726}
2727
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002728int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002729{
2730 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002731
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002732 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002733 return -EBADF;
2734
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002735 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002736 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002737
Johan Hedberg27f70f32014-07-21 10:50:06 +03002738 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002739 if (!entry)
2740 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002741
2742 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002743 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002744
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002745 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002746
2747 return 0;
2748}
2749
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002750int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002751{
2752 struct bdaddr_list *entry;
2753
Johan Hedberg35f74982014-02-18 17:14:32 +02002754 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002755 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002756 return 0;
2757 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002758
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002759 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002760 if (!entry)
2761 return -ENOENT;
2762
2763 list_del(&entry->list);
2764 kfree(entry);
2765
2766 return 0;
2767}
2768
Andre Guedes15819a72014-02-03 13:56:18 -03002769/* This function requires the caller holds hdev->lock */
2770struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2771 bdaddr_t *addr, u8 addr_type)
2772{
2773 struct hci_conn_params *params;
2774
2775 list_for_each_entry(params, &hdev->le_conn_params, list) {
2776 if (bacmp(&params->addr, addr) == 0 &&
2777 params->addr_type == addr_type) {
2778 return params;
2779 }
2780 }
2781
2782 return NULL;
2783}
2784
2785/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002786struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2787 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002788{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002789 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002790
Johan Hedberg501f8822014-07-04 12:37:26 +03002791 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002792 if (bacmp(&param->addr, addr) == 0 &&
2793 param->addr_type == addr_type)
2794 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002795 }
2796
2797 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002798}
2799
2800/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002801struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2802 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002803{
2804 struct hci_conn_params *params;
2805
2806 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002807 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002808 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002809
2810 params = kzalloc(sizeof(*params), GFP_KERNEL);
2811 if (!params) {
2812 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002813 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002814 }
2815
2816 bacpy(&params->addr, addr);
2817 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002818
2819 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002820 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002821
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002822 params->conn_min_interval = hdev->le_conn_min_interval;
2823 params->conn_max_interval = hdev->le_conn_max_interval;
2824 params->conn_latency = hdev->le_conn_latency;
2825 params->supervision_timeout = hdev->le_supv_timeout;
2826 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2827
2828 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2829
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002830 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002831}
2832
Johan Hedbergf6c63242014-08-15 21:06:59 +03002833static void hci_conn_params_free(struct hci_conn_params *params)
2834{
2835 if (params->conn) {
2836 hci_conn_drop(params->conn);
2837 hci_conn_put(params->conn);
2838 }
2839
2840 list_del(&params->action);
2841 list_del(&params->list);
2842 kfree(params);
2843}
2844
Andre Guedes15819a72014-02-03 13:56:18 -03002845/* This function requires the caller holds hdev->lock */
2846void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2847{
2848 struct hci_conn_params *params;
2849
2850 params = hci_conn_params_lookup(hdev, addr, addr_type);
2851 if (!params)
2852 return;
2853
Johan Hedbergf6c63242014-08-15 21:06:59 +03002854 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002855
Johan Hedberg95305ba2014-07-04 12:37:21 +03002856 hci_update_background_scan(hdev);
2857
Andre Guedes15819a72014-02-03 13:56:18 -03002858 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2859}
2860
2861/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03002862void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002863{
2864 struct hci_conn_params *params, *tmp;
2865
2866 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03002867 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2868 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02002869
2870 /* If trying to estabilish one time connection to disabled
2871 * device, leave the params, but mark them as just once.
2872 */
2873 if (params->explicit_connect) {
2874 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2875 continue;
2876 }
2877
Andre Guedes15819a72014-02-03 13:56:18 -03002878 list_del(&params->list);
2879 kfree(params);
2880 }
2881
Johan Hedberg55af49a2014-07-02 17:37:26 +03002882 BT_DBG("All LE disabled connection parameters were removed");
2883}
2884
2885/* This function requires the caller holds hdev->lock */
Johan Hedberg030e7f82015-11-10 09:44:53 +02002886static void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002887{
2888 struct hci_conn_params *params, *tmp;
2889
Johan Hedbergf6c63242014-08-15 21:06:59 +03002890 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2891 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002892
2893 BT_DBG("All LE connection parameters were removed");
2894}
2895
Johan Hedberga1f4c312014-02-27 14:05:41 +02002896/* Copy the Identity Address of the controller.
2897 *
2898 * If the controller has a public BD_ADDR, then by default use that one.
2899 * If this is a LE only controller without a public address, default to
2900 * the static random address.
2901 *
2902 * For debugging purposes it is possible to force controllers with a
2903 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002904 *
2905 * In case BR/EDR has been disabled on a dual-mode controller and
2906 * userspace has configured a static address, then that address
2907 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002908 */
2909void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2910 u8 *bdaddr_type)
2911{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002912 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002913 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002914 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002915 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002916 bacpy(bdaddr, &hdev->static_addr);
2917 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2918 } else {
2919 bacpy(bdaddr, &hdev->bdaddr);
2920 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2921 }
2922}
2923
David Herrmann9be0dab2012-04-22 14:39:57 +02002924/* Alloc HCI device */
2925struct hci_dev *hci_alloc_dev(void)
2926{
2927 struct hci_dev *hdev;
2928
Johan Hedberg27f70f32014-07-21 10:50:06 +03002929 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02002930 if (!hdev)
2931 return NULL;
2932
David Herrmannb1b813d2012-04-22 14:39:58 +02002933 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2934 hdev->esco_type = (ESCO_HV1);
2935 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002936 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2937 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02002938 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002939 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2940 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02002941 hdev->adv_instance_cnt = 0;
2942 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02002943 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02002944
David Herrmannb1b813d2012-04-22 14:39:58 +02002945 hdev->sniff_max_interval = 800;
2946 hdev->sniff_min_interval = 80;
2947
Marcel Holtmann3f959d42014-02-20 11:55:56 -08002948 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02002949 hdev->le_adv_min_interval = 0x0800;
2950 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002951 hdev->le_scan_interval = 0x0060;
2952 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002953 hdev->le_conn_min_interval = 0x0028;
2954 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02002955 hdev->le_conn_latency = 0x0000;
2956 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01002957 hdev->le_def_tx_len = 0x001b;
2958 hdev->le_def_tx_time = 0x0148;
2959 hdev->le_max_tx_len = 0x001b;
2960 hdev->le_max_tx_time = 0x0148;
2961 hdev->le_max_rx_len = 0x001b;
2962 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002963
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002964 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01002965 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02002966 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2967 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002968
David Herrmannb1b813d2012-04-22 14:39:58 +02002969 mutex_init(&hdev->lock);
2970 mutex_init(&hdev->req_lock);
2971
2972 INIT_LIST_HEAD(&hdev->mgmt_pending);
2973 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03002974 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02002975 INIT_LIST_HEAD(&hdev->uuids);
2976 INIT_LIST_HEAD(&hdev->link_keys);
2977 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002978 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02002979 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002980 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03002981 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03002982 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03002983 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002984 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02002985 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02002986
2987 INIT_WORK(&hdev->rx_work, hci_rx_work);
2988 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2989 INIT_WORK(&hdev->tx_work, hci_tx_work);
2990 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002991 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02002992
David Herrmannb1b813d2012-04-22 14:39:58 +02002993 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
David Herrmannb1b813d2012-04-22 14:39:58 +02002994
David Herrmannb1b813d2012-04-22 14:39:58 +02002995 skb_queue_head_init(&hdev->rx_q);
2996 skb_queue_head_init(&hdev->cmd_q);
2997 skb_queue_head_init(&hdev->raw_q);
2998
2999 init_waitqueue_head(&hdev->req_wait_q);
3000
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003001 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003002
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003003 hci_request_setup(hdev);
3004
David Herrmannb1b813d2012-04-22 14:39:58 +02003005 hci_init_sysfs(hdev);
3006 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003007
3008 return hdev;
3009}
3010EXPORT_SYMBOL(hci_alloc_dev);
3011
3012/* Free HCI device */
3013void hci_free_dev(struct hci_dev *hdev)
3014{
David Herrmann9be0dab2012-04-22 14:39:57 +02003015 /* will free via device release */
3016 put_device(&hdev->dev);
3017}
3018EXPORT_SYMBOL(hci_free_dev);
3019
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020/* Register HCI device */
3021int hci_register_dev(struct hci_dev *hdev)
3022{
David Herrmannb1b813d2012-04-22 14:39:58 +02003023 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024
Marcel Holtmann74292d52014-07-06 15:50:27 +02003025 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 return -EINVAL;
3027
Mat Martineau08add512011-11-02 16:18:36 -07003028 /* Do not allow HCI_AMP devices to register at index 0,
3029 * so the index can be used as the AMP controller ID.
3030 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003031 switch (hdev->dev_type) {
3032 case HCI_BREDR:
3033 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3034 break;
3035 case HCI_AMP:
3036 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3037 break;
3038 default:
3039 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003041
Sasha Levin3df92b32012-05-27 22:36:56 +02003042 if (id < 0)
3043 return id;
3044
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 sprintf(hdev->name, "hci%d", id);
3046 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003047
3048 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3049
Kees Cookd8537542013-07-03 15:04:57 -07003050 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3051 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003052 if (!hdev->workqueue) {
3053 error = -ENOMEM;
3054 goto err;
3055 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003056
Kees Cookd8537542013-07-03 15:04:57 -07003057 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3058 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003059 if (!hdev->req_workqueue) {
3060 destroy_workqueue(hdev->workqueue);
3061 error = -ENOMEM;
3062 goto err;
3063 }
3064
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003065 if (!IS_ERR_OR_NULL(bt_debugfs))
3066 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3067
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003068 dev_set_name(&hdev->dev, "%s", hdev->name);
3069
3070 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003071 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003072 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +01003074 hci_leds_init(hdev);
3075
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003076 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003077 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3078 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003079 if (hdev->rfkill) {
3080 if (rfkill_register(hdev->rfkill) < 0) {
3081 rfkill_destroy(hdev->rfkill);
3082 hdev->rfkill = NULL;
3083 }
3084 }
3085
Johan Hedberg5e130362013-09-13 08:58:17 +03003086 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003087 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003088
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003089 hci_dev_set_flag(hdev, HCI_SETUP);
3090 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003091
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003092 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003093 /* Assume BR/EDR support until proven otherwise (such as
3094 * through reading supported features during init.
3095 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003096 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003097 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003098
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003099 write_lock(&hci_dev_list_lock);
3100 list_add(&hdev->list, &hci_dev_list);
3101 write_unlock(&hci_dev_list_lock);
3102
Marcel Holtmann4a964402014-07-02 19:10:33 +02003103 /* Devices that are marked for raw-only usage are unconfigured
3104 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003105 */
3106 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003107 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003108
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003109 hci_sock_dev_event(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003110 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111
Johan Hedberg19202572013-01-14 22:33:51 +02003112 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003113
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003115
David Herrmann33ca9542011-10-08 14:58:49 +02003116err_wqueue:
3117 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003118 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003119err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003120 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003121
David Herrmann33ca9542011-10-08 14:58:49 +02003122 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123}
3124EXPORT_SYMBOL(hci_register_dev);
3125
3126/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003127void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003129 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003130
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003131 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003133 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003134
Sasha Levin3df92b32012-05-27 22:36:56 +02003135 id = hdev->id;
3136
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +01003137 hci_leds_exit(hdev);
3138
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003139 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003141 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142
3143 hci_dev_do_close(hdev);
3144
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003145 cancel_work_sync(&hdev->power_on);
3146
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003147 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003148 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3149 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003150 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003151 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003152 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003153 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003154
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003155 /* mgmt_index_removed should take care of emptying the
3156 * pending list */
3157 BUG_ON(!list_empty(&hdev->mgmt_pending));
3158
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003159 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003161 if (hdev->rfkill) {
3162 rfkill_unregister(hdev->rfkill);
3163 rfkill_destroy(hdev->rfkill);
3164 }
3165
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003166 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003167
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003168 debugfs_remove_recursive(hdev->debugfs);
3169
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003170 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003171 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003172
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003173 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003174 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003175 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003176 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003177 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003178 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003179 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003180 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003181 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003182 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003183 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003184 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003185 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003186
David Herrmanndc946bd2012-01-07 15:47:24 +01003187 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003188
3189 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190}
3191EXPORT_SYMBOL(hci_unregister_dev);
3192
3193/* Suspend HCI device */
3194int hci_suspend_dev(struct hci_dev *hdev)
3195{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003196 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 return 0;
3198}
3199EXPORT_SYMBOL(hci_suspend_dev);
3200
3201/* Resume HCI device */
3202int hci_resume_dev(struct hci_dev *hdev)
3203{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003204 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 return 0;
3206}
3207EXPORT_SYMBOL(hci_resume_dev);
3208
Marcel Holtmann75e05692014-11-02 08:15:38 +01003209/* Reset HCI device */
3210int hci_reset_dev(struct hci_dev *hdev)
3211{
3212 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3213 struct sk_buff *skb;
3214
3215 skb = bt_skb_alloc(3, GFP_ATOMIC);
3216 if (!skb)
3217 return -ENOMEM;
3218
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003219 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
Marcel Holtmann75e05692014-11-02 08:15:38 +01003220 memcpy(skb_put(skb, 3), hw_err, 3);
3221
3222 /* Send Hardware Error to upper stack */
3223 return hci_recv_frame(hdev, skb);
3224}
3225EXPORT_SYMBOL(hci_reset_dev);
3226
Marcel Holtmann76bca882009-11-18 00:40:39 +01003227/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003228int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003229{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003230 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003231 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003232 kfree_skb(skb);
3233 return -ENXIO;
3234 }
3235
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003236 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3237 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3238 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmannfe806dc2015-10-08 03:14:28 +02003239 kfree_skb(skb);
3240 return -EINVAL;
3241 }
3242
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003243 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003244 bt_cb(skb)->incoming = 1;
3245
3246 /* Time stamp */
3247 __net_timestamp(skb);
3248
Marcel Holtmann76bca882009-11-18 00:40:39 +01003249 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003250 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003251
Marcel Holtmann76bca882009-11-18 00:40:39 +01003252 return 0;
3253}
3254EXPORT_SYMBOL(hci_recv_frame);
3255
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003256/* Receive diagnostic message from HCI drivers */
3257int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3258{
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003259 /* Mark as diagnostic packet */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003260 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003261
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003262 /* Time stamp */
3263 __net_timestamp(skb);
3264
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003265 skb_queue_tail(&hdev->rx_q, skb);
3266 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003267
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003268 return 0;
3269}
3270EXPORT_SYMBOL(hci_recv_diag);
3271
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272/* ---- Interface to upper protocols ---- */
3273
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274int hci_register_cb(struct hci_cb *cb)
3275{
3276 BT_DBG("%p name %s", cb, cb->name);
3277
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003278 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003279 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003280 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281
3282 return 0;
3283}
3284EXPORT_SYMBOL(hci_register_cb);
3285
3286int hci_unregister_cb(struct hci_cb *cb)
3287{
3288 BT_DBG("%p name %s", cb, cb->name);
3289
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003290 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003292 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293
3294 return 0;
3295}
3296EXPORT_SYMBOL(hci_unregister_cb);
3297
Marcel Holtmann51086992013-10-10 14:54:19 -07003298static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003300 int err;
3301
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003302 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3303 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003305 /* Time stamp */
3306 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003308 /* Send copy to monitor */
3309 hci_send_to_monitor(hdev, skb);
3310
3311 if (atomic_read(&hdev->promisc)) {
3312 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003313 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314 }
3315
3316 /* Get rid of skb owner, prior to sending to the driver. */
3317 skb_orphan(skb);
3318
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003319 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3320 kfree_skb(skb);
3321 return;
3322 }
3323
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003324 err = hdev->send(hdev, skb);
3325 if (err < 0) {
3326 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3327 kfree_skb(skb);
3328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329}
3330
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003331/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003332int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3333 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003334{
3335 struct sk_buff *skb;
3336
3337 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3338
3339 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3340 if (!skb) {
3341 BT_ERR("%s no memory for command", hdev->name);
3342 return -ENOMEM;
3343 }
3344
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003345 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003346 * single-command requests.
3347 */
Johan Hedberg44d27132015-11-05 09:31:40 +02003348 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg11714b32013-03-05 20:37:47 +02003349
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003351 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352
3353 return 0;
3354}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355
3356/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003357void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358{
3359 struct hci_command_hdr *hdr;
3360
3361 if (!hdev->sent_cmd)
3362 return NULL;
3363
3364 hdr = (void *) hdev->sent_cmd->data;
3365
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003366 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367 return NULL;
3368
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003369 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370
3371 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3372}
3373
Loic Poulainfbef1682015-09-29 15:05:44 +02003374/* Send HCI command and wait for command commplete event */
3375struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3376 const void *param, u32 timeout)
3377{
3378 struct sk_buff *skb;
3379
3380 if (!test_bit(HCI_UP, &hdev->flags))
3381 return ERR_PTR(-ENETDOWN);
3382
3383 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3384
Johan Hedbergb5044302015-11-10 09:44:55 +02003385 hci_req_sync_lock(hdev);
Loic Poulainfbef1682015-09-29 15:05:44 +02003386 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
Johan Hedbergb5044302015-11-10 09:44:55 +02003387 hci_req_sync_unlock(hdev);
Loic Poulainfbef1682015-09-29 15:05:44 +02003388
3389 return skb;
3390}
3391EXPORT_SYMBOL(hci_cmd_sync);
3392
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393/* Send ACL data */
3394static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3395{
3396 struct hci_acl_hdr *hdr;
3397 int len = skb->len;
3398
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003399 skb_push(skb, HCI_ACL_HDR_SIZE);
3400 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003401 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003402 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3403 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404}
3405
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003406static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003407 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003409 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 struct hci_dev *hdev = conn->hdev;
3411 struct sk_buff *list;
3412
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003413 skb->len = skb_headlen(skb);
3414 skb->data_len = 0;
3415
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003416 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003417
3418 switch (hdev->dev_type) {
3419 case HCI_BREDR:
3420 hci_add_acl_hdr(skb, conn->handle, flags);
3421 break;
3422 case HCI_AMP:
3423 hci_add_acl_hdr(skb, chan->handle, flags);
3424 break;
3425 default:
3426 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3427 return;
3428 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003429
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003430 list = skb_shinfo(skb)->frag_list;
3431 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432 /* Non fragmented */
3433 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3434
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003435 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 } else {
3437 /* Fragmented */
3438 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3439
3440 skb_shinfo(skb)->frag_list = NULL;
3441
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003442 /* Queue all fragments atomically. We need to use spin_lock_bh
3443 * here because of 6LoWPAN links, as there this function is
3444 * called from softirq and using normal spin lock could cause
3445 * deadlocks.
3446 */
3447 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003449 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003450
3451 flags &= ~ACL_START;
3452 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 do {
3454 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003455
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003456 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003457 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458
3459 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3460
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003461 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 } while (list);
3463
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003464 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003466}
3467
3468void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3469{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003470 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003471
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003472 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003473
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003474 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003476 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478
3479/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003480void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481{
3482 struct hci_dev *hdev = conn->hdev;
3483 struct hci_sco_hdr hdr;
3484
3485 BT_DBG("%s len %d", hdev->name, skb->len);
3486
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003487 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 hdr.dlen = skb->len;
3489
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003490 skb_push(skb, HCI_SCO_HDR_SIZE);
3491 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003492 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003494 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003495
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003497 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499
3500/* ---- HCI TX task (outgoing data) ---- */
3501
3502/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003503static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3504 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505{
3506 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003507 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003508 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003510 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003512
3513 rcu_read_lock();
3514
3515 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003516 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003518
3519 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3520 continue;
3521
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522 num++;
3523
3524 if (c->sent < min) {
3525 min = c->sent;
3526 conn = c;
3527 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003528
3529 if (hci_conn_num(hdev, type) == num)
3530 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531 }
3532
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003533 rcu_read_unlock();
3534
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003536 int cnt, q;
3537
3538 switch (conn->type) {
3539 case ACL_LINK:
3540 cnt = hdev->acl_cnt;
3541 break;
3542 case SCO_LINK:
3543 case ESCO_LINK:
3544 cnt = hdev->sco_cnt;
3545 break;
3546 case LE_LINK:
3547 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3548 break;
3549 default:
3550 cnt = 0;
3551 BT_ERR("Unknown link type");
3552 }
3553
3554 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555 *quote = q ? q : 1;
3556 } else
3557 *quote = 0;
3558
3559 BT_DBG("conn %p quote %d", conn, *quote);
3560 return conn;
3561}
3562
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003563static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564{
3565 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003566 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567
Ville Tervobae1f5d92011-02-10 22:38:53 -03003568 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003570 rcu_read_lock();
3571
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003573 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003574 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003575 BT_ERR("%s killing stalled connection %pMR",
3576 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003577 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578 }
3579 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003580
3581 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582}
3583
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003584static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3585 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003586{
3587 struct hci_conn_hash *h = &hdev->conn_hash;
3588 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003589 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003590 struct hci_conn *conn;
3591 int cnt, q, conn_num = 0;
3592
3593 BT_DBG("%s", hdev->name);
3594
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003595 rcu_read_lock();
3596
3597 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003598 struct hci_chan *tmp;
3599
3600 if (conn->type != type)
3601 continue;
3602
3603 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3604 continue;
3605
3606 conn_num++;
3607
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003608 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003609 struct sk_buff *skb;
3610
3611 if (skb_queue_empty(&tmp->data_q))
3612 continue;
3613
3614 skb = skb_peek(&tmp->data_q);
3615 if (skb->priority < cur_prio)
3616 continue;
3617
3618 if (skb->priority > cur_prio) {
3619 num = 0;
3620 min = ~0;
3621 cur_prio = skb->priority;
3622 }
3623
3624 num++;
3625
3626 if (conn->sent < min) {
3627 min = conn->sent;
3628 chan = tmp;
3629 }
3630 }
3631
3632 if (hci_conn_num(hdev, type) == conn_num)
3633 break;
3634 }
3635
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003636 rcu_read_unlock();
3637
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003638 if (!chan)
3639 return NULL;
3640
3641 switch (chan->conn->type) {
3642 case ACL_LINK:
3643 cnt = hdev->acl_cnt;
3644 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003645 case AMP_LINK:
3646 cnt = hdev->block_cnt;
3647 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003648 case SCO_LINK:
3649 case ESCO_LINK:
3650 cnt = hdev->sco_cnt;
3651 break;
3652 case LE_LINK:
3653 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3654 break;
3655 default:
3656 cnt = 0;
3657 BT_ERR("Unknown link type");
3658 }
3659
3660 q = cnt / num;
3661 *quote = q ? q : 1;
3662 BT_DBG("chan %p quote %d", chan, *quote);
3663 return chan;
3664}
3665
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003666static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3667{
3668 struct hci_conn_hash *h = &hdev->conn_hash;
3669 struct hci_conn *conn;
3670 int num = 0;
3671
3672 BT_DBG("%s", hdev->name);
3673
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003674 rcu_read_lock();
3675
3676 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003677 struct hci_chan *chan;
3678
3679 if (conn->type != type)
3680 continue;
3681
3682 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3683 continue;
3684
3685 num++;
3686
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003687 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003688 struct sk_buff *skb;
3689
3690 if (chan->sent) {
3691 chan->sent = 0;
3692 continue;
3693 }
3694
3695 if (skb_queue_empty(&chan->data_q))
3696 continue;
3697
3698 skb = skb_peek(&chan->data_q);
3699 if (skb->priority >= HCI_PRIO_MAX - 1)
3700 continue;
3701
3702 skb->priority = HCI_PRIO_MAX - 1;
3703
3704 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003705 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003706 }
3707
3708 if (hci_conn_num(hdev, type) == num)
3709 break;
3710 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003711
3712 rcu_read_unlock();
3713
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003714}
3715
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003716static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3717{
3718 /* Calculate count of blocks used by this packet */
3719 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3720}
3721
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003722static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003724 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725 /* ACL tx timeout must be longer than maximum
3726 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003727 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003728 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003729 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003731}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003733static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003734{
3735 unsigned int cnt = hdev->acl_cnt;
3736 struct hci_chan *chan;
3737 struct sk_buff *skb;
3738 int quote;
3739
3740 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003741
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003742 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003743 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003744 u32 priority = (skb_peek(&chan->data_q))->priority;
3745 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003746 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003747 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003748
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003749 /* Stop if priority has changed */
3750 if (skb->priority < priority)
3751 break;
3752
3753 skb = skb_dequeue(&chan->data_q);
3754
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003755 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003756 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003757
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003758 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003759 hdev->acl_last_tx = jiffies;
3760
3761 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003762 chan->sent++;
3763 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764 }
3765 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003766
3767 if (cnt != hdev->acl_cnt)
3768 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769}
3770
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003771static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003772{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003773 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003774 struct hci_chan *chan;
3775 struct sk_buff *skb;
3776 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003777 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003778
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003779 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003780
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003781 BT_DBG("%s", hdev->name);
3782
3783 if (hdev->dev_type == HCI_AMP)
3784 type = AMP_LINK;
3785 else
3786 type = ACL_LINK;
3787
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003788 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003789 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003790 u32 priority = (skb_peek(&chan->data_q))->priority;
3791 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3792 int blocks;
3793
3794 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003795 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003796
3797 /* Stop if priority has changed */
3798 if (skb->priority < priority)
3799 break;
3800
3801 skb = skb_dequeue(&chan->data_q);
3802
3803 blocks = __get_blocks(hdev, skb);
3804 if (blocks > hdev->block_cnt)
3805 return;
3806
3807 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003808 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003809
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003810 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003811 hdev->acl_last_tx = jiffies;
3812
3813 hdev->block_cnt -= blocks;
3814 quote -= blocks;
3815
3816 chan->sent += blocks;
3817 chan->conn->sent += blocks;
3818 }
3819 }
3820
3821 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003822 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003823}
3824
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003825static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003826{
3827 BT_DBG("%s", hdev->name);
3828
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003829 /* No ACL link over BR/EDR controller */
3830 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3831 return;
3832
3833 /* No AMP link over AMP controller */
3834 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003835 return;
3836
3837 switch (hdev->flow_ctl_mode) {
3838 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3839 hci_sched_acl_pkt(hdev);
3840 break;
3841
3842 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3843 hci_sched_acl_blk(hdev);
3844 break;
3845 }
3846}
3847
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003849static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850{
3851 struct hci_conn *conn;
3852 struct sk_buff *skb;
3853 int quote;
3854
3855 BT_DBG("%s", hdev->name);
3856
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003857 if (!hci_conn_num(hdev, SCO_LINK))
3858 return;
3859
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3861 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3862 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003863 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864
3865 conn->sent++;
3866 if (conn->sent == ~0)
3867 conn->sent = 0;
3868 }
3869 }
3870}
3871
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003872static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003873{
3874 struct hci_conn *conn;
3875 struct sk_buff *skb;
3876 int quote;
3877
3878 BT_DBG("%s", hdev->name);
3879
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003880 if (!hci_conn_num(hdev, ESCO_LINK))
3881 return;
3882
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003883 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3884 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003885 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3886 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003887 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003888
3889 conn->sent++;
3890 if (conn->sent == ~0)
3891 conn->sent = 0;
3892 }
3893 }
3894}
3895
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003896static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003897{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003898 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003899 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003900 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003901
3902 BT_DBG("%s", hdev->name);
3903
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003904 if (!hci_conn_num(hdev, LE_LINK))
3905 return;
3906
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003907 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003908 /* LE tx timeout must be longer than maximum
3909 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003910 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003911 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003912 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003913 }
3914
3915 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003916 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003917 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003918 u32 priority = (skb_peek(&chan->data_q))->priority;
3919 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003920 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003921 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003922
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003923 /* Stop if priority has changed */
3924 if (skb->priority < priority)
3925 break;
3926
3927 skb = skb_dequeue(&chan->data_q);
3928
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003929 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003930 hdev->le_last_tx = jiffies;
3931
3932 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003933 chan->sent++;
3934 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003935 }
3936 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003937
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003938 if (hdev->le_pkts)
3939 hdev->le_cnt = cnt;
3940 else
3941 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003942
3943 if (cnt != tmp)
3944 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003945}
3946
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003947static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003949 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950 struct sk_buff *skb;
3951
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003952 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003953 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003955 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07003956 /* Schedule queues and send stuff to HCI driver */
3957 hci_sched_acl(hdev);
3958 hci_sched_sco(hdev);
3959 hci_sched_esco(hdev);
3960 hci_sched_le(hdev);
3961 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003962
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963 /* Send next queued raw (unknown type) packet */
3964 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003965 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966}
3967
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003968/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969
3970/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003971static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972{
3973 struct hci_acl_hdr *hdr = (void *) skb->data;
3974 struct hci_conn *conn;
3975 __u16 handle, flags;
3976
3977 skb_pull(skb, HCI_ACL_HDR_SIZE);
3978
3979 handle = __le16_to_cpu(hdr->handle);
3980 flags = hci_flags(handle);
3981 handle = hci_handle(handle);
3982
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003983 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003984 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985
3986 hdev->stat.acl_rx++;
3987
3988 hci_dev_lock(hdev);
3989 conn = hci_conn_hash_lookup_handle(hdev, handle);
3990 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003991
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003993 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003994
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003996 l2cap_recv_acldata(conn, skb, flags);
3997 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003999 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004000 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001 }
4002
4003 kfree_skb(skb);
4004}
4005
4006/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004007static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008{
4009 struct hci_sco_hdr *hdr = (void *) skb->data;
4010 struct hci_conn *conn;
4011 __u16 handle;
4012
4013 skb_pull(skb, HCI_SCO_HDR_SIZE);
4014
4015 handle = __le16_to_cpu(hdr->handle);
4016
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004017 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018
4019 hdev->stat.sco_rx++;
4020
4021 hci_dev_lock(hdev);
4022 conn = hci_conn_hash_lookup_handle(hdev, handle);
4023 hci_dev_unlock(hdev);
4024
4025 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004027 sco_recv_scodata(conn, skb);
4028 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004030 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004031 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032 }
4033
4034 kfree_skb(skb);
4035}
4036
Johan Hedberg9238f362013-03-05 20:37:48 +02004037static bool hci_req_is_complete(struct hci_dev *hdev)
4038{
4039 struct sk_buff *skb;
4040
4041 skb = skb_peek(&hdev->cmd_q);
4042 if (!skb)
4043 return true;
4044
Johan Hedberg44d27132015-11-05 09:31:40 +02004045 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
Johan Hedberg9238f362013-03-05 20:37:48 +02004046}
4047
Johan Hedberg42c6b122013-03-05 20:37:49 +02004048static void hci_resend_last(struct hci_dev *hdev)
4049{
4050 struct hci_command_hdr *sent;
4051 struct sk_buff *skb;
4052 u16 opcode;
4053
4054 if (!hdev->sent_cmd)
4055 return;
4056
4057 sent = (void *) hdev->sent_cmd->data;
4058 opcode = __le16_to_cpu(sent->opcode);
4059 if (opcode == HCI_OP_RESET)
4060 return;
4061
4062 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4063 if (!skb)
4064 return;
4065
4066 skb_queue_head(&hdev->cmd_q, skb);
4067 queue_work(hdev->workqueue, &hdev->cmd_work);
4068}
4069
Johan Hedberge62144872015-04-02 13:41:08 +03004070void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4071 hci_req_complete_t *req_complete,
4072 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004073{
Johan Hedberg9238f362013-03-05 20:37:48 +02004074 struct sk_buff *skb;
4075 unsigned long flags;
4076
4077 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4078
Johan Hedberg42c6b122013-03-05 20:37:49 +02004079 /* If the completed command doesn't match the last one that was
4080 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004081 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004082 if (!hci_sent_cmd_data(hdev, opcode)) {
4083 /* Some CSR based controllers generate a spontaneous
4084 * reset complete event during init and any pending
4085 * command will never be completed. In such a case we
4086 * need to resend whatever was the last sent
4087 * command.
4088 */
4089 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4090 hci_resend_last(hdev);
4091
Johan Hedberg9238f362013-03-05 20:37:48 +02004092 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004093 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004094
4095 /* If the command succeeded and there's still more commands in
4096 * this request the request is not yet complete.
4097 */
4098 if (!status && !hci_req_is_complete(hdev))
4099 return;
4100
4101 /* If this was the last command in a request the complete
4102 * callback would be found in hdev->sent_cmd instead of the
4103 * command queue (hdev->cmd_q).
4104 */
Johan Hedberg44d27132015-11-05 09:31:40 +02004105 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4106 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
Johan Hedberge62144872015-04-02 13:41:08 +03004107 return;
4108 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004109
Johan Hedberg44d27132015-11-05 09:31:40 +02004110 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4111 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
Johan Hedberge62144872015-04-02 13:41:08 +03004112 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004113 }
4114
4115 /* Remove all pending commands belonging to this request */
4116 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4117 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Johan Hedberg44d27132015-11-05 09:31:40 +02004118 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004119 __skb_queue_head(&hdev->cmd_q, skb);
4120 break;
4121 }
4122
Douglas Anderson3bd75942016-02-19 14:25:21 -08004123 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4124 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4125 else
4126 *req_complete = bt_cb(skb)->hci.req_complete;
Johan Hedberg9238f362013-03-05 20:37:48 +02004127 kfree_skb(skb);
4128 }
4129 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004130}
4131
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004132static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004134 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135 struct sk_buff *skb;
4136
4137 BT_DBG("%s", hdev->name);
4138
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004140 /* Send copy to monitor */
4141 hci_send_to_monitor(hdev, skb);
4142
Linus Torvalds1da177e2005-04-16 15:20:36 -07004143 if (atomic_read(&hdev->promisc)) {
4144 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004145 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146 }
4147
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004148 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149 kfree_skb(skb);
4150 continue;
4151 }
4152
4153 if (test_bit(HCI_INIT, &hdev->flags)) {
4154 /* Don't process data packets in this states. */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004155 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156 case HCI_ACLDATA_PKT:
4157 case HCI_SCODATA_PKT:
4158 kfree_skb(skb);
4159 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161 }
4162
4163 /* Process frame */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004164 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004166 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167 hci_event_packet(hdev, skb);
4168 break;
4169
4170 case HCI_ACLDATA_PKT:
4171 BT_DBG("%s ACL data packet", hdev->name);
4172 hci_acldata_packet(hdev, skb);
4173 break;
4174
4175 case HCI_SCODATA_PKT:
4176 BT_DBG("%s SCO data packet", hdev->name);
4177 hci_scodata_packet(hdev, skb);
4178 break;
4179
4180 default:
4181 kfree_skb(skb);
4182 break;
4183 }
4184 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185}
4186
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004187static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004189 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004190 struct sk_buff *skb;
4191
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004192 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4193 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004196 if (atomic_read(&hdev->cmd_cnt)) {
4197 skb = skb_dequeue(&hdev->cmd_q);
4198 if (!skb)
4199 return;
4200
Wei Yongjun7585b972009-02-25 18:29:52 +08004201 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004203 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004204 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004206 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004207 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004208 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004209 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004210 schedule_delayed_work(&hdev->cmd_timer,
4211 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 } else {
4213 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004214 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 }
4216 }
4217}