blob: 89af7e4fac0255e6427e362d0afa8636c07b495a [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020054DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070061static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
Prasanna Karthik74b93e92015-11-18 12:38:41 +000067 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070068 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070081
82 if (!test_bit(HCI_UP, &hdev->flags))
83 return -ENETDOWN;
84
85 if (copy_from_user(buf, user_buf, buf_size))
86 return -EFAULT;
87
88 buf[buf_size] = '\0';
89 if (strtobool(buf, &enable))
90 return -EINVAL;
91
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070092 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070093 return -EALREADY;
94
Johan Hedbergb5044302015-11-10 09:44:55 +020095 hci_req_sync_lock(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070096 if (enable)
97 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
98 HCI_CMD_TIMEOUT);
99 else
100 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101 HCI_CMD_TIMEOUT);
Johan Hedbergb5044302015-11-10 09:44:55 +0200102 hci_req_sync_unlock(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700103
104 if (IS_ERR(skb))
105 return PTR_ERR(skb);
106
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700107 kfree_skb(skb);
108
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700109 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700110
111 return count;
112}
113
114static const struct file_operations dut_mode_fops = {
115 .open = simple_open,
116 .read = dut_mode_read,
117 .write = dut_mode_write,
118 .llseek = default_llseek,
119};
120
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200121static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122 size_t count, loff_t *ppos)
123{
124 struct hci_dev *hdev = file->private_data;
125 char buf[3];
126
Prasanna Karthik74b93e92015-11-18 12:38:41 +0000127 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200128 buf[1] = '\n';
129 buf[2] = '\0';
130 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131}
132
133static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134 size_t count, loff_t *ppos)
135{
136 struct hci_dev *hdev = file->private_data;
137 char buf[32];
138 size_t buf_size = min(count, (sizeof(buf)-1));
139 bool enable;
140 int err;
141
142 if (copy_from_user(buf, user_buf, buf_size))
143 return -EFAULT;
144
145 buf[buf_size] = '\0';
146 if (strtobool(buf, &enable))
147 return -EINVAL;
148
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200149 /* When the diagnostic flags are not persistent and the transport
150 * is not active, then there is no need for the vendor callback.
151 *
152 * Instead just store the desired value. If needed the setting
153 * will be programmed when the controller gets powered on.
154 */
155 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
156 !test_bit(HCI_RUNNING, &hdev->flags))
157 goto done;
158
Johan Hedbergb5044302015-11-10 09:44:55 +0200159 hci_req_sync_lock(hdev);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200160 err = hdev->set_diag(hdev, enable);
Johan Hedbergb5044302015-11-10 09:44:55 +0200161 hci_req_sync_unlock(hdev);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200162
163 if (err < 0)
164 return err;
165
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200166done:
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200167 if (enable)
168 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
169 else
170 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
171
172 return count;
173}
174
175static const struct file_operations vendor_diag_fops = {
176 .open = simple_open,
177 .read = vendor_diag_read,
178 .write = vendor_diag_write,
179 .llseek = default_llseek,
180};
181
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200182static void hci_debugfs_create_basic(struct hci_dev *hdev)
183{
184 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
185 &dut_mode_fops);
186
187 if (hdev->set_diag)
188 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
189 &vendor_diag_fops);
190}
191
Johan Hedberga1d01db2015-11-11 08:11:25 +0200192static int hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200194 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
196 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200197 set_bit(HCI_RESET, &req->hdev->flags);
198 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200199 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
Johan Hedberg42c6b122013-03-05 20:37:49 +0200202static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200207 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200211
212 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200213 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
215
Johan Hedberg0af801b2015-02-17 15:05:21 +0200216static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200217{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200218 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200219
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200220 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200221 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300222
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700223 /* Read Local Supported Commands */
224 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
225
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300226 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200227 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300228
229 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200230 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700231
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700232 /* Read Flow Control Mode */
233 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
234
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700235 /* Read Location Data */
236 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200237}
238
Johan Hedberga1d01db2015-11-11 08:11:25 +0200239static int amp_init2(struct hci_request *req)
Johan Hedberg0af801b2015-02-17 15:05:21 +0200240{
241 /* Read Local Supported Features. Not all AMP controllers
242 * support this so it's placed conditionally in the second
243 * stage init.
244 */
245 if (req->hdev->commands[14] & 0x20)
246 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200247
248 return 0;
Johan Hedberg0af801b2015-02-17 15:05:21 +0200249}
250
Johan Hedberga1d01db2015-11-11 08:11:25 +0200251static int hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200252{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200253 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200254
255 BT_DBG("%s %ld", hdev->name, opt);
256
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300257 /* Reset */
258 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200259 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300260
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200261 switch (hdev->dev_type) {
262 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200264 break;
265
266 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200267 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200268 break;
269
270 default:
271 BT_ERR("Unknown device type %d", hdev->dev_type);
272 break;
273 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200274
275 return 0;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200276}
277
Johan Hedberg42c6b122013-03-05 20:37:49 +0200278static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200279{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200280 __le16 param;
281 __u8 flt_type;
282
283 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200284 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200285
286 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200287 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200288
289 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200290 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200291
292 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200293 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200294
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700295 /* Read Number of Supported IAC */
296 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
297
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700298 /* Read Current IAC LAP */
299 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
300
Johan Hedberg2177bab2013-03-05 20:37:43 +0200301 /* Clear Event Filters */
302 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200304
305 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700306 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200307 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200308}
309
Johan Hedberg42c6b122013-03-05 20:37:49 +0200310static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200311{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300312 struct hci_dev *hdev = req->hdev;
313
Johan Hedberg2177bab2013-03-05 20:37:43 +0200314 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200315 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200316
317 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200318 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200319
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800320 /* Read LE Supported States */
321 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
322
Johan Hedbergc73eee92013-04-19 18:35:21 +0300323 /* LE-only controllers have LE implicitly enabled */
324 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700325 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200326}
327
Johan Hedberg42c6b122013-03-05 20:37:49 +0200328static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200329{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200330 struct hci_dev *hdev = req->hdev;
331
Johan Hedberg2177bab2013-03-05 20:37:43 +0200332 /* The second byte is 0xff instead of 0x9f (two reserved bits
333 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
334 * command otherwise.
335 */
336 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
337
338 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
339 * any event mask for pre 1.2 devices.
340 */
341 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
342 return;
343
344 if (lmp_bredr_capable(hdev)) {
345 events[4] |= 0x01; /* Flow Specification Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700346 } else {
347 /* Use a different default for LE-only devices */
348 memset(events, 0, sizeof(events));
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700349 events[1] |= 0x20; /* Command Complete */
350 events[1] |= 0x40; /* Command Status */
351 events[1] |= 0x80; /* Hardware Error */
Marcel Holtmann5c3d3b42015-11-04 07:17:23 +0100352
353 /* If the controller supports the Disconnect command, enable
354 * the corresponding event. In addition enable packet flow
355 * control related events.
356 */
357 if (hdev->commands[0] & 0x20) {
358 events[0] |= 0x10; /* Disconnection Complete */
359 events[2] |= 0x04; /* Number of Completed Packets */
360 events[3] |= 0x02; /* Data Buffer Overflow */
361 }
362
363 /* If the controller supports the Read Remote Version
364 * Information command, enable the corresponding event.
365 */
366 if (hdev->commands[2] & 0x80)
367 events[1] |= 0x08; /* Read Remote Version Information
368 * Complete
369 */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200370
371 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
372 events[0] |= 0x80; /* Encryption Change */
373 events[5] |= 0x80; /* Encryption Key Refresh Complete */
374 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375 }
376
Marcel Holtmann9fe759c2015-11-01 09:45:22 +0100377 if (lmp_inq_rssi_capable(hdev) ||
378 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
Johan Hedberg2177bab2013-03-05 20:37:43 +0200379 events[4] |= 0x02; /* Inquiry Result with RSSI */
380
Marcel Holtmann70f56aa2015-11-01 09:39:49 +0100381 if (lmp_ext_feat_capable(hdev))
382 events[4] |= 0x04; /* Read Remote Extended Features Complete */
383
384 if (lmp_esco_capable(hdev)) {
385 events[5] |= 0x08; /* Synchronous Connection Complete */
386 events[5] |= 0x10; /* Synchronous Connection Changed */
387 }
388
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389 if (lmp_sniffsubr_capable(hdev))
390 events[5] |= 0x20; /* Sniff Subrating */
391
392 if (lmp_pause_enc_capable(hdev))
393 events[5] |= 0x80; /* Encryption Key Refresh Complete */
394
395 if (lmp_ext_inq_capable(hdev))
396 events[5] |= 0x40; /* Extended Inquiry Result */
397
398 if (lmp_no_flush_capable(hdev))
399 events[7] |= 0x01; /* Enhanced Flush Complete */
400
401 if (lmp_lsto_capable(hdev))
402 events[6] |= 0x80; /* Link Supervision Timeout Changed */
403
404 if (lmp_ssp_capable(hdev)) {
405 events[6] |= 0x01; /* IO Capability Request */
406 events[6] |= 0x02; /* IO Capability Response */
407 events[6] |= 0x04; /* User Confirmation Request */
408 events[6] |= 0x08; /* User Passkey Request */
409 events[6] |= 0x10; /* Remote OOB Data Request */
410 events[6] |= 0x20; /* Simple Pairing Complete */
411 events[7] |= 0x04; /* User Passkey Notification */
412 events[7] |= 0x08; /* Keypress Notification */
413 events[7] |= 0x10; /* Remote Host Supported
414 * Features Notification
415 */
416 }
417
418 if (lmp_le_capable(hdev))
419 events[7] |= 0x20; /* LE Meta-Event */
420
Johan Hedberg42c6b122013-03-05 20:37:49 +0200421 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200422}
423
Johan Hedberga1d01db2015-11-11 08:11:25 +0200424static int hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200425{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426 struct hci_dev *hdev = req->hdev;
427
Johan Hedberg0af801b2015-02-17 15:05:21 +0200428 if (hdev->dev_type == HCI_AMP)
429 return amp_init2(req);
430
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300433 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700434 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200435
436 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200438
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100439 /* All Bluetooth 1.2 and later controllers should support the
440 * HCI command for reading the local supported commands.
441 *
442 * Unfortunately some controllers indicate Bluetooth 1.2 support,
443 * but do not have support for this command. If that is the case,
444 * the driver can quirk the behavior and skip reading the local
445 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300446 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100447 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
448 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200449 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200450
451 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700452 /* When SSP is available, then the host features page
453 * should also be available as well. However some
454 * controllers list the max_page as 0 as long as SSP
455 * has not been enabled. To achieve proper debugging
456 * output, force the minimum max_page to 1 at least.
457 */
458 hdev->max_page = 0x01;
459
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700460 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200461 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800462
Johan Hedberg42c6b122013-03-05 20:37:49 +0200463 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
464 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200465 } else {
466 struct hci_cp_write_eir cp;
467
468 memset(hdev->eir, 0, sizeof(hdev->eir));
469 memset(&cp, 0, sizeof(cp));
470
Johan Hedberg42c6b122013-03-05 20:37:49 +0200471 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200472 }
473 }
474
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800475 if (lmp_inq_rssi_capable(hdev) ||
476 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800477 u8 mode;
478
479 /* If Extended Inquiry Result events are supported, then
480 * they are clearly preferred over Inquiry Result with RSSI
481 * events.
482 */
483 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
484
485 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
486 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200487
488 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200490
491 if (lmp_ext_feat_capable(hdev)) {
492 struct hci_cp_read_local_ext_features cp;
493
494 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
496 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200497 }
498
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700499 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200500 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
502 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200503 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200504
505 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200506}
507
Johan Hedberg42c6b122013-03-05 20:37:49 +0200508static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200509{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200510 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200511 struct hci_cp_write_def_link_policy cp;
512 u16 link_policy = 0;
513
514 if (lmp_rswitch_capable(hdev))
515 link_policy |= HCI_LP_RSWITCH;
516 if (lmp_hold_capable(hdev))
517 link_policy |= HCI_LP_HOLD;
518 if (lmp_sniff_capable(hdev))
519 link_policy |= HCI_LP_SNIFF;
520 if (lmp_park_capable(hdev))
521 link_policy |= HCI_LP_PARK;
522
523 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525}
526
Johan Hedberg42c6b122013-03-05 20:37:49 +0200527static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200528{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200529 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200530 struct hci_cp_write_le_host_supported cp;
531
Johan Hedbergc73eee92013-04-19 18:35:21 +0300532 /* LE-only devices do not support explicit enablement */
533 if (!lmp_bredr_capable(hdev))
534 return;
535
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536 memset(&cp, 0, sizeof(cp));
537
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700538 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200540 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541 }
542
543 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200544 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
545 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546}
547
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300548static void hci_set_event_mask_page_2(struct hci_request *req)
549{
550 struct hci_dev *hdev = req->hdev;
551 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
552
553 /* If Connectionless Slave Broadcast master role is supported
554 * enable all necessary events for it.
555 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800556 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300557 events[1] |= 0x40; /* Triggered Clock Capture */
558 events[1] |= 0x80; /* Synchronization Train Complete */
559 events[2] |= 0x10; /* Slave Page Response Timeout */
560 events[2] |= 0x20; /* CSB Channel Map Change */
561 }
562
563 /* If Connectionless Slave Broadcast slave role is supported
564 * enable all necessary events for it.
565 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800566 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300567 events[2] |= 0x01; /* Synchronization Train Received */
568 events[2] |= 0x02; /* CSB Receive */
569 events[2] |= 0x04; /* CSB Timeout */
570 events[2] |= 0x08; /* Truncated Page Complete */
571 }
572
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800573 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200574 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800575 events[2] |= 0x80;
576
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300577 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
578}
579
Johan Hedberga1d01db2015-11-11 08:11:25 +0200580static int hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200582 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300583 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200584
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200585 hci_setup_event_mask(req);
586
Johan Hedberge81be902015-08-30 21:47:20 +0300587 if (hdev->commands[6] & 0x20 &&
588 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800589 struct hci_cp_read_stored_link_key cp;
590
591 bacpy(&cp.bdaddr, BDADDR_ANY);
592 cp.read_all = 0x01;
593 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
594 }
595
Johan Hedberg2177bab2013-03-05 20:37:43 +0200596 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200597 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598
Marcel Holtmann417287d2014-12-11 20:21:54 +0100599 if (hdev->commands[8] & 0x01)
600 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
601
602 /* Some older Broadcom based Bluetooth 1.2 controllers do not
603 * support the Read Page Scan Type command. Check support for
604 * this command in the bit mask of supported commands.
605 */
606 if (hdev->commands[13] & 0x01)
607 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
608
Andre Guedes9193c6e2014-07-01 18:10:09 -0300609 if (lmp_le_capable(hdev)) {
610 u8 events[8];
611
612 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200613
614 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
615 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300616
617 /* If controller supports the Connection Parameters Request
618 * Link Layer Procedure, enable the corresponding event.
619 */
620 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
621 events[0] |= 0x20; /* LE Remote Connection
622 * Parameter Request
623 */
624
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100625 /* If the controller supports the Data Length Extension
626 * feature, enable the corresponding event.
627 */
628 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
629 events[0] |= 0x40; /* LE Data Length Change */
630
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100631 /* If the controller supports Extended Scanner Filter
632 * Policies, enable the correspondig event.
633 */
634 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
635 events[1] |= 0x04; /* LE Direct Advertising
636 * Report
637 */
638
Marcel Holtmann7d26f5c2015-11-01 09:39:51 +0100639 /* If the controller supports the LE Set Scan Enable command,
640 * enable the corresponding advertising report event.
641 */
642 if (hdev->commands[26] & 0x08)
643 events[0] |= 0x02; /* LE Advertising Report */
644
645 /* If the controller supports the LE Create Connection
646 * command, enable the corresponding event.
647 */
648 if (hdev->commands[26] & 0x10)
649 events[0] |= 0x01; /* LE Connection Complete */
650
651 /* If the controller supports the LE Connection Update
652 * command, enable the corresponding event.
653 */
654 if (hdev->commands[27] & 0x04)
655 events[0] |= 0x04; /* LE Connection Update
656 * Complete
657 */
658
659 /* If the controller supports the LE Read Remote Used Features
660 * command, enable the corresponding event.
661 */
662 if (hdev->commands[27] & 0x20)
663 events[0] |= 0x08; /* LE Read Remote Used
664 * Features Complete
665 */
666
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100667 /* If the controller supports the LE Read Local P-256
668 * Public Key command, enable the corresponding event.
669 */
670 if (hdev->commands[34] & 0x02)
671 events[0] |= 0x80; /* LE Read Local P-256
672 * Public Key Complete
673 */
674
675 /* If the controller supports the LE Generate DHKey
676 * command, enable the corresponding event.
677 */
678 if (hdev->commands[34] & 0x04)
679 events[1] |= 0x01; /* LE Generate DHKey Complete */
680
Andre Guedes9193c6e2014-07-01 18:10:09 -0300681 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
682 events);
683
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200684 if (hdev->commands[25] & 0x40) {
685 /* Read LE Advertising Channel TX Power */
686 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
687 }
688
Marcel Holtmann2ab216a2015-11-01 09:39:48 +0100689 if (hdev->commands[26] & 0x40) {
690 /* Read LE White List Size */
691 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
692 0, NULL);
693 }
694
695 if (hdev->commands[26] & 0x80) {
696 /* Clear LE White List */
697 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
698 }
699
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100700 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
701 /* Read LE Maximum Data Length */
702 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
703
704 /* Read LE Suggested Default Data Length */
705 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
706 }
707
Johan Hedberg42c6b122013-03-05 20:37:49 +0200708 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300709 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300710
711 /* Read features beyond page 1 if available */
712 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
713 struct hci_cp_read_local_ext_features cp;
714
715 cp.page = p;
716 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
717 sizeof(cp), &cp);
718 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200719
720 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200721}
722
Johan Hedberga1d01db2015-11-11 08:11:25 +0200723static int hci_init4_req(struct hci_request *req, unsigned long opt)
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300724{
725 struct hci_dev *hdev = req->hdev;
726
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800727 /* Some Broadcom based Bluetooth controllers do not support the
728 * Delete Stored Link Key command. They are clearly indicating its
729 * absence in the bit mask of supported commands.
730 *
731 * Check the supported commands and only if the the command is marked
732 * as supported send it. If not supported assume that the controller
733 * does not have actual support for stored link keys which makes this
734 * command redundant anyway.
735 *
736 * Some controllers indicate that they support handling deleting
737 * stored link keys, but they don't. The quirk lets a driver
738 * just disable this command.
739 */
740 if (hdev->commands[6] & 0x80 &&
741 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
742 struct hci_cp_delete_stored_link_key cp;
743
744 bacpy(&cp.bdaddr, BDADDR_ANY);
745 cp.delete_all = 0x01;
746 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
747 sizeof(cp), &cp);
748 }
749
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300750 /* Set event mask page 2 if the HCI command for it is supported */
751 if (hdev->commands[22] & 0x04)
752 hci_set_event_mask_page_2(req);
753
Marcel Holtmann109e3192014-07-23 19:24:56 +0200754 /* Read local codec list if the HCI command is supported */
755 if (hdev->commands[29] & 0x20)
756 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
757
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200758 /* Get MWS transport configuration if the HCI command is supported */
759 if (hdev->commands[30] & 0x08)
760 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
761
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300762 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800763 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300764 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800765
766 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700767 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800768 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800769 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800770
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800771 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
772 sizeof(support), &support);
773 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200774
775 return 0;
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300776}
777
Johan Hedberg2177bab2013-03-05 20:37:43 +0200778static int __hci_init(struct hci_dev *hdev)
779{
780 int err;
781
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200782 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200783 if (err < 0)
784 return err;
785
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200786 if (hci_dev_test_flag(hdev, HCI_SETUP))
787 hci_debugfs_create_basic(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700788
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200789 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200790 if (err < 0)
791 return err;
792
Johan Hedberg0af801b2015-02-17 15:05:21 +0200793 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
794 * BR/EDR/LE type controllers. AMP controllers only need the
795 * first two stages of init.
796 */
797 if (hdev->dev_type != HCI_BREDR)
798 return 0;
799
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200800 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300801 if (err < 0)
802 return err;
803
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200804 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700805 if (err < 0)
806 return err;
807
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800808 /* This function is only called when the controller is actually in
809 * configured state. When the controller is marked as unconfigured,
810 * this initialization procedure is not run.
811 *
812 * It means that it is possible that a controller runs through its
813 * setup phase and then discovers missing settings. If that is the
814 * case, then this function will not be called. It then will only
815 * be called during the config phase.
816 *
817 * So only when in setup phase or config phase, create the debugfs
818 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700819 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700820 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
821 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700822 return 0;
823
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100824 hci_debugfs_create_common(hdev);
825
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100826 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100827 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700828
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800829 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100830 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700831
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700832 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200833}
834
Johan Hedberga1d01db2015-11-11 08:11:25 +0200835static int hci_init0_req(struct hci_request *req, unsigned long opt)
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200836{
837 struct hci_dev *hdev = req->hdev;
838
839 BT_DBG("%s %ld", hdev->name, opt);
840
841 /* Reset */
842 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
843 hci_reset_req(req, 0);
844
845 /* Read Local Version */
846 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
847
848 /* Read BD Address */
849 if (hdev->set_bdaddr)
850 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200851
852 return 0;
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200853}
854
855static int __hci_unconf_init(struct hci_dev *hdev)
856{
857 int err;
858
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200859 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
860 return 0;
861
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200862 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200863 if (err < 0)
864 return err;
865
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200866 if (hci_dev_test_flag(hdev, HCI_SETUP))
867 hci_debugfs_create_basic(hdev);
868
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200869 return 0;
870}
871
Johan Hedberga1d01db2015-11-11 08:11:25 +0200872static int hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873{
874 __u8 scan = opt;
875
Johan Hedberg42c6b122013-03-05 20:37:49 +0200876 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
878 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200879 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200880 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881}
882
Johan Hedberga1d01db2015-11-11 08:11:25 +0200883static int hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884{
885 __u8 auth = opt;
886
Johan Hedberg42c6b122013-03-05 20:37:49 +0200887 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
889 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200890 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200891 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892}
893
Johan Hedberga1d01db2015-11-11 08:11:25 +0200894static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895{
896 __u8 encrypt = opt;
897
Johan Hedberg42c6b122013-03-05 20:37:49 +0200898 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200900 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200901 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200902 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903}
904
Johan Hedberga1d01db2015-11-11 08:11:25 +0200905static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200906{
907 __le16 policy = cpu_to_le16(opt);
908
Johan Hedberg42c6b122013-03-05 20:37:49 +0200909 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200910
911 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200912 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200913 return 0;
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200914}
915
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900916/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 * Device is held on return. */
918struct hci_dev *hci_dev_get(int index)
919{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200920 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
922 BT_DBG("%d", index);
923
924 if (index < 0)
925 return NULL;
926
927 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200928 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 if (d->id == index) {
930 hdev = hci_dev_hold(d);
931 break;
932 }
933 }
934 read_unlock(&hci_dev_list_lock);
935 return hdev;
936}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
938/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200939
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200940bool hci_discovery_active(struct hci_dev *hdev)
941{
942 struct discovery_state *discov = &hdev->discovery;
943
Andre Guedes6fbe1952012-02-03 17:47:58 -0300944 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300945 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300946 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200947 return true;
948
Andre Guedes6fbe1952012-02-03 17:47:58 -0300949 default:
950 return false;
951 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200952}
953
Johan Hedbergff9ef572012-01-04 14:23:45 +0200954void hci_discovery_set_state(struct hci_dev *hdev, int state)
955{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300956 int old_state = hdev->discovery.state;
957
Johan Hedbergff9ef572012-01-04 14:23:45 +0200958 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
959
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300960 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +0200961 return;
962
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300963 hdev->discovery.state = state;
964
Johan Hedbergff9ef572012-01-04 14:23:45 +0200965 switch (state) {
966 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -0300967 hci_update_background_scan(hdev);
968
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300969 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -0300970 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200971 break;
972 case DISCOVERY_STARTING:
973 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300974 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200975 mgmt_discovering(hdev, 1);
976 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200977 case DISCOVERY_RESOLVING:
978 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200979 case DISCOVERY_STOPPING:
980 break;
981 }
Johan Hedbergff9ef572012-01-04 14:23:45 +0200982}
983
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300984void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985{
Johan Hedberg30883512012-01-04 14:16:21 +0200986 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200987 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
Johan Hedberg561aafb2012-01-04 13:31:59 +0200989 list_for_each_entry_safe(p, n, &cache->all, all) {
990 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200991 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200993
994 INIT_LIST_HEAD(&cache->unknown);
995 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996}
997
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300998struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
999 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000{
Johan Hedberg30883512012-01-04 14:16:21 +02001001 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 struct inquiry_entry *e;
1003
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001004 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
Johan Hedberg561aafb2012-01-04 13:31:59 +02001006 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001008 return e;
1009 }
1010
1011 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012}
1013
Johan Hedberg561aafb2012-01-04 13:31:59 +02001014struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001015 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001016{
Johan Hedberg30883512012-01-04 14:16:21 +02001017 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001018 struct inquiry_entry *e;
1019
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001020 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001021
1022 list_for_each_entry(e, &cache->unknown, list) {
1023 if (!bacmp(&e->data.bdaddr, bdaddr))
1024 return e;
1025 }
1026
1027 return NULL;
1028}
1029
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001030struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001031 bdaddr_t *bdaddr,
1032 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001033{
1034 struct discovery_state *cache = &hdev->discovery;
1035 struct inquiry_entry *e;
1036
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001037 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001038
1039 list_for_each_entry(e, &cache->resolve, list) {
1040 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1041 return e;
1042 if (!bacmp(&e->data.bdaddr, bdaddr))
1043 return e;
1044 }
1045
1046 return NULL;
1047}
1048
Johan Hedberga3d4e202012-01-09 00:53:02 +02001049void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001050 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001051{
1052 struct discovery_state *cache = &hdev->discovery;
1053 struct list_head *pos = &cache->resolve;
1054 struct inquiry_entry *p;
1055
1056 list_del(&ie->list);
1057
1058 list_for_each_entry(p, &cache->resolve, list) {
1059 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001060 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001061 break;
1062 pos = &p->list;
1063 }
1064
1065 list_add(&ie->list, pos);
1066}
1067
Marcel Holtmannaf589252014-07-01 14:11:20 +02001068u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1069 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070{
Johan Hedberg30883512012-01-04 14:16:21 +02001071 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001072 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001073 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001075 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
Johan Hedberg6928a922014-10-26 20:46:09 +01001077 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001078
Marcel Holtmannaf589252014-07-01 14:11:20 +02001079 if (!data->ssp_mode)
1080 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001081
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001082 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001083 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001084 if (!ie->data.ssp_mode)
1085 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001086
Johan Hedberga3d4e202012-01-09 00:53:02 +02001087 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001088 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001089 ie->data.rssi = data->rssi;
1090 hci_inquiry_cache_update_resolve(hdev, ie);
1091 }
1092
Johan Hedberg561aafb2012-01-04 13:31:59 +02001093 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001094 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001095
Johan Hedberg561aafb2012-01-04 13:31:59 +02001096 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001097 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001098 if (!ie) {
1099 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1100 goto done;
1101 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001102
1103 list_add(&ie->all, &cache->all);
1104
1105 if (name_known) {
1106 ie->name_state = NAME_KNOWN;
1107 } else {
1108 ie->name_state = NAME_NOT_KNOWN;
1109 list_add(&ie->list, &cache->unknown);
1110 }
1111
1112update:
1113 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001114 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001115 ie->name_state = NAME_KNOWN;
1116 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 }
1118
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001119 memcpy(&ie->data, data, sizeof(*data));
1120 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001122
1123 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001124 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001125
Marcel Holtmannaf589252014-07-01 14:11:20 +02001126done:
1127 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128}
1129
1130static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1131{
Johan Hedberg30883512012-01-04 14:16:21 +02001132 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 struct inquiry_info *info = (struct inquiry_info *) buf;
1134 struct inquiry_entry *e;
1135 int copied = 0;
1136
Johan Hedberg561aafb2012-01-04 13:31:59 +02001137 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001139
1140 if (copied >= num)
1141 break;
1142
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 bacpy(&info->bdaddr, &data->bdaddr);
1144 info->pscan_rep_mode = data->pscan_rep_mode;
1145 info->pscan_period_mode = data->pscan_period_mode;
1146 info->pscan_mode = data->pscan_mode;
1147 memcpy(info->dev_class, data->dev_class, 3);
1148 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001149
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001151 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 }
1153
1154 BT_DBG("cache %p, copied %d", cache, copied);
1155 return copied;
1156}
1157
Johan Hedberga1d01db2015-11-11 08:11:25 +02001158static int hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159{
1160 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001161 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 struct hci_cp_inquiry cp;
1163
1164 BT_DBG("%s", hdev->name);
1165
1166 if (test_bit(HCI_INQUIRY, &hdev->flags))
Johan Hedberga1d01db2015-11-11 08:11:25 +02001167 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
1169 /* Start Inquiry */
1170 memcpy(&cp.lap, &ir->lap, 3);
1171 cp.length = ir->length;
1172 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001173 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001174
1175 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176}
1177
1178int hci_inquiry(void __user *arg)
1179{
1180 __u8 __user *ptr = arg;
1181 struct hci_inquiry_req ir;
1182 struct hci_dev *hdev;
1183 int err = 0, do_inquiry = 0, max_rsp;
1184 long timeo;
1185 __u8 *buf;
1186
1187 if (copy_from_user(&ir, ptr, sizeof(ir)))
1188 return -EFAULT;
1189
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001190 hdev = hci_dev_get(ir.dev_id);
1191 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 return -ENODEV;
1193
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001194 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001195 err = -EBUSY;
1196 goto done;
1197 }
1198
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001199 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001200 err = -EOPNOTSUPP;
1201 goto done;
1202 }
1203
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001204 if (hdev->dev_type != HCI_BREDR) {
1205 err = -EOPNOTSUPP;
1206 goto done;
1207 }
1208
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001209 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001210 err = -EOPNOTSUPP;
1211 goto done;
1212 }
1213
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001214 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001215 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001216 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001217 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 do_inquiry = 1;
1219 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001220 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
Marcel Holtmann04837f62006-07-03 10:02:33 +02001222 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001223
1224 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001225 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001226 timeo, NULL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001227 if (err < 0)
1228 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001229
1230 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1231 * cleared). If it is interrupted by a signal, return -EINTR.
1232 */
NeilBrown74316202014-07-07 15:16:04 +10001233 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001234 TASK_INTERRUPTIBLE))
1235 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001238 /* for unlimited number of responses we will use buffer with
1239 * 255 entries
1240 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1242
1243 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1244 * copy it to the user space.
1245 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001246 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001247 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 err = -ENOMEM;
1249 goto done;
1250 }
1251
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001252 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001254 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
1256 BT_DBG("num_rsp %d", ir.num_rsp);
1257
1258 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1259 ptr += sizeof(ir);
1260 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001261 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001263 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 err = -EFAULT;
1265
1266 kfree(buf);
1267
1268done:
1269 hci_dev_put(hdev);
1270 return err;
1271}
1272
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001273static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 int ret = 0;
1276
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 BT_DBG("%s %p", hdev->name, hdev);
1278
Johan Hedbergb5044302015-11-10 09:44:55 +02001279 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001281 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001282 ret = -ENODEV;
1283 goto done;
1284 }
1285
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001286 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1287 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001288 /* Check for rfkill but allow the HCI setup stage to
1289 * proceed (which in itself doesn't cause any RF activity).
1290 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001291 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001292 ret = -ERFKILL;
1293 goto done;
1294 }
1295
1296 /* Check for valid public address or a configured static
1297 * random adddress, but let the HCI setup proceed to
1298 * be able to determine if there is a public address
1299 * or not.
1300 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001301 * In case of user channel usage, it is not important
1302 * if a public address or static random address is
1303 * available.
1304 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001305 * This check is only valid for BR/EDR controllers
1306 * since AMP controllers do not have an address.
1307 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001308 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001309 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001310 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1311 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1312 ret = -EADDRNOTAVAIL;
1313 goto done;
1314 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001315 }
1316
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 if (test_bit(HCI_UP, &hdev->flags)) {
1318 ret = -EALREADY;
1319 goto done;
1320 }
1321
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 if (hdev->open(hdev)) {
1323 ret = -EIO;
1324 goto done;
1325 }
1326
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001327 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001328 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001329
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001330 atomic_set(&hdev->cmd_cnt, 1);
1331 set_bit(HCI_INIT, &hdev->flags);
1332
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001333 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmanne131d742015-10-20 02:30:47 +02001334 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1335
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001336 if (hdev->setup)
1337 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001338
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001339 /* The transport driver can set these quirks before
1340 * creating the HCI device or in its setup callback.
1341 *
1342 * In case any of them is set, the controller has to
1343 * start up as unconfigured.
1344 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001345 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1346 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001347 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001348
1349 /* For an unconfigured controller it is required to
1350 * read at least the version information provided by
1351 * the Read Local Version Information command.
1352 *
1353 * If the set_bdaddr driver callback is provided, then
1354 * also the original Bluetooth public device address
1355 * will be read using the Read BD Address command.
1356 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001357 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001358 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001359 }
1360
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001361 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001362 /* If public address change is configured, ensure that
1363 * the address gets programmed. If the driver does not
1364 * support changing the public address, fail the power
1365 * on procedure.
1366 */
1367 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1368 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001369 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1370 else
1371 ret = -EADDRNOTAVAIL;
1372 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001373
1374 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001375 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001376 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001377 ret = __hci_init(hdev);
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001378 if (!ret && hdev->post_init)
1379 ret = hdev->post_init(hdev);
1380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 }
1382
Marcel Holtmann7e995b92015-10-17 16:00:26 +02001383 /* If the HCI Reset command is clearing all diagnostic settings,
1384 * then they need to be reprogrammed after the init procedure
1385 * completed.
1386 */
1387 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1388 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1389 ret = hdev->set_diag(hdev, true);
1390
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001391 clear_bit(HCI_INIT, &hdev->flags);
1392
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 if (!ret) {
1394 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001395 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 set_bit(HCI_UP, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001397 hci_sock_dev_event(hdev, HCI_DEV_UP);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001398 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1399 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1400 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1401 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001402 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001403 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001404 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001405 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001406 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001407 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001409 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001410 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001411 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412
1413 skb_queue_purge(&hdev->cmd_q);
1414 skb_queue_purge(&hdev->rx_q);
1415
1416 if (hdev->flush)
1417 hdev->flush(hdev);
1418
1419 if (hdev->sent_cmd) {
1420 kfree_skb(hdev->sent_cmd);
1421 hdev->sent_cmd = NULL;
1422 }
1423
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001424 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001425 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001428 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 }
1430
1431done:
Johan Hedbergb5044302015-11-10 09:44:55 +02001432 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 return ret;
1434}
1435
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001436/* ---- HCI ioctl helpers ---- */
1437
1438int hci_dev_open(__u16 dev)
1439{
1440 struct hci_dev *hdev;
1441 int err;
1442
1443 hdev = hci_dev_get(dev);
1444 if (!hdev)
1445 return -ENODEV;
1446
Marcel Holtmann4a964402014-07-02 19:10:33 +02001447 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001448 * up as user channel. Trying to bring them up as normal devices
1449 * will result into a failure. Only user channel operation is
1450 * possible.
1451 *
1452 * When this function is called for a user channel, the flag
1453 * HCI_USER_CHANNEL will be set first before attempting to
1454 * open the device.
1455 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001456 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1457 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001458 err = -EOPNOTSUPP;
1459 goto done;
1460 }
1461
Johan Hedberge1d08f42013-10-01 22:44:50 +03001462 /* We need to ensure that no other power on/off work is pending
1463 * before proceeding to call hci_dev_do_open. This is
1464 * particularly important if the setup procedure has not yet
1465 * completed.
1466 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001467 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001468 cancel_delayed_work(&hdev->power_off);
1469
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001470 /* After this call it is guaranteed that the setup procedure
1471 * has finished. This means that error conditions like RFKILL
1472 * or no valid public or static random address apply.
1473 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001474 flush_workqueue(hdev->req_workqueue);
1475
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001476 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001477 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001478 * so that pairing works for them. Once the management interface
1479 * is in use this bit will be cleared again and userspace has
1480 * to explicitly enable it.
1481 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001482 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1483 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001484 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001485
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001486 err = hci_dev_do_open(hdev);
1487
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001488done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001489 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001490 return err;
1491}
1492
Johan Hedbergd7347f32014-07-04 12:37:23 +03001493/* This function requires the caller holds hdev->lock */
1494static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1495{
1496 struct hci_conn_params *p;
1497
Johan Hedbergf161dd42014-08-15 21:06:54 +03001498 list_for_each_entry(p, &hdev->le_conn_params, list) {
1499 if (p->conn) {
1500 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001501 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001502 p->conn = NULL;
1503 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001504 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001505 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001506
1507 BT_DBG("All LE pending actions cleared");
1508}
1509
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001510int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511{
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001512 bool auto_off;
1513
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 BT_DBG("%s %p", hdev->name, hdev);
1515
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001516 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001517 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001518 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001519 /* Execute vendor specific shutdown routine */
1520 if (hdev->shutdown)
1521 hdev->shutdown(hdev);
1522 }
1523
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001524 cancel_delayed_work(&hdev->power_off);
1525
Johan Hedberg7df0f732015-11-12 15:15:00 +02001526 hci_request_cancel_all(hdev);
Johan Hedbergb5044302015-11-10 09:44:55 +02001527 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
1529 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001530 cancel_delayed_work_sync(&hdev->cmd_timer);
Johan Hedbergb5044302015-11-10 09:44:55 +02001531 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 return 0;
1533 }
1534
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001535 /* Flush RX and TX works */
1536 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001537 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001539 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001540 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001541 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001542 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1543 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001544 }
1545
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001546 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001547 cancel_delayed_work(&hdev->service_cache);
1548
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001549 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001550 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001551
Florian Grandel5d900e42015-06-18 03:16:35 +02001552 if (hdev->adv_instance_timeout) {
1553 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1554 hdev->adv_instance_timeout = 0;
1555 }
1556
Johan Hedberg76727c02014-11-18 09:00:14 +02001557 /* Avoid potential lockdep warnings from the *_flush() calls by
1558 * ensuring the workqueue is empty up front.
1559 */
1560 drain_workqueue(hdev->workqueue);
1561
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001562 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001563
Johan Hedberg8f502f82015-01-28 19:56:02 +02001564 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1565
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001566 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1567
1568 if (!auto_off && hdev->dev_type == HCI_BREDR)
1569 mgmt_powered(hdev, 0);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001570
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001571 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001572 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001573 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001574 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575
Marcel Holtmann64dae962015-01-28 14:10:28 -08001576 smp_unregister(hdev);
1577
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001578 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579
1580 if (hdev->flush)
1581 hdev->flush(hdev);
1582
1583 /* Reset device */
1584 skb_queue_purge(&hdev->cmd_q);
1585 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001586 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1587 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001589 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 clear_bit(HCI_INIT, &hdev->flags);
1591 }
1592
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001593 /* flush cmd work */
1594 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595
1596 /* Drop queues */
1597 skb_queue_purge(&hdev->rx_q);
1598 skb_queue_purge(&hdev->cmd_q);
1599 skb_queue_purge(&hdev->raw_q);
1600
1601 /* Drop last sent command */
1602 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001603 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 kfree_skb(hdev->sent_cmd);
1605 hdev->sent_cmd = NULL;
1606 }
1607
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001608 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001609 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001610
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 /* After this point our queues are empty
1612 * and no tasks are scheduled. */
1613 hdev->close(hdev);
1614
Johan Hedberg35b973c2013-03-15 17:06:59 -05001615 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001616 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001617 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001618
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001619 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001620 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001621
Johan Hedberge59fda82012-02-22 18:11:53 +02001622 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001623 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001624 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001625
Johan Hedbergb5044302015-11-10 09:44:55 +02001626 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627
1628 hci_dev_put(hdev);
1629 return 0;
1630}
1631
1632int hci_dev_close(__u16 dev)
1633{
1634 struct hci_dev *hdev;
1635 int err;
1636
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001637 hdev = hci_dev_get(dev);
1638 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001640
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001641 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001642 err = -EBUSY;
1643 goto done;
1644 }
1645
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001646 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001647 cancel_delayed_work(&hdev->power_off);
1648
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001650
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001651done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 hci_dev_put(hdev);
1653 return err;
1654}
1655
Marcel Holtmann5c912492015-01-28 11:53:05 -08001656static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001658 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659
Marcel Holtmann5c912492015-01-28 11:53:05 -08001660 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
Johan Hedbergb5044302015-11-10 09:44:55 +02001662 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 /* Drop queues */
1665 skb_queue_purge(&hdev->rx_q);
1666 skb_queue_purge(&hdev->cmd_q);
1667
Johan Hedberg76727c02014-11-18 09:00:14 +02001668 /* Avoid potential lockdep warnings from the *_flush() calls by
1669 * ensuring the workqueue is empty up front.
1670 */
1671 drain_workqueue(hdev->workqueue);
1672
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001673 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001674 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001676 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
1678 if (hdev->flush)
1679 hdev->flush(hdev);
1680
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001681 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001682 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001684 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
Johan Hedbergb5044302015-11-10 09:44:55 +02001686 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 return ret;
1688}
1689
Marcel Holtmann5c912492015-01-28 11:53:05 -08001690int hci_dev_reset(__u16 dev)
1691{
1692 struct hci_dev *hdev;
1693 int err;
1694
1695 hdev = hci_dev_get(dev);
1696 if (!hdev)
1697 return -ENODEV;
1698
1699 if (!test_bit(HCI_UP, &hdev->flags)) {
1700 err = -ENETDOWN;
1701 goto done;
1702 }
1703
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001704 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001705 err = -EBUSY;
1706 goto done;
1707 }
1708
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001709 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001710 err = -EOPNOTSUPP;
1711 goto done;
1712 }
1713
1714 err = hci_dev_do_reset(hdev);
1715
1716done:
1717 hci_dev_put(hdev);
1718 return err;
1719}
1720
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721int hci_dev_reset_stat(__u16 dev)
1722{
1723 struct hci_dev *hdev;
1724 int ret = 0;
1725
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001726 hdev = hci_dev_get(dev);
1727 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 return -ENODEV;
1729
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001730 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001731 ret = -EBUSY;
1732 goto done;
1733 }
1734
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001735 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001736 ret = -EOPNOTSUPP;
1737 goto done;
1738 }
1739
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1741
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001742done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 return ret;
1745}
1746
Johan Hedberg123abc02014-07-10 12:09:07 +03001747static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1748{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001749 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001750
1751 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1752
1753 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001754 conn_changed = !hci_dev_test_and_set_flag(hdev,
1755 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001756 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001757 conn_changed = hci_dev_test_and_clear_flag(hdev,
1758 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001759
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001760 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001761 discov_changed = !hci_dev_test_and_set_flag(hdev,
1762 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001763 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001764 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001765 discov_changed = hci_dev_test_and_clear_flag(hdev,
1766 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001767 }
1768
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001769 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001770 return;
1771
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001772 if (conn_changed || discov_changed) {
1773 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001774 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001775
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001776 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001777 mgmt_update_adv_data(hdev);
1778
Johan Hedberg123abc02014-07-10 12:09:07 +03001779 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001780 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001781}
1782
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783int hci_dev_cmd(unsigned int cmd, void __user *arg)
1784{
1785 struct hci_dev *hdev;
1786 struct hci_dev_req dr;
1787 int err = 0;
1788
1789 if (copy_from_user(&dr, arg, sizeof(dr)))
1790 return -EFAULT;
1791
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001792 hdev = hci_dev_get(dr.dev_id);
1793 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 return -ENODEV;
1795
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001796 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001797 err = -EBUSY;
1798 goto done;
1799 }
1800
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001801 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001802 err = -EOPNOTSUPP;
1803 goto done;
1804 }
1805
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001806 if (hdev->dev_type != HCI_BREDR) {
1807 err = -EOPNOTSUPP;
1808 goto done;
1809 }
1810
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001811 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001812 err = -EOPNOTSUPP;
1813 goto done;
1814 }
1815
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 switch (cmd) {
1817 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001818 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001819 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 break;
1821
1822 case HCISETENCRYPT:
1823 if (!lmp_encrypt_capable(hdev)) {
1824 err = -EOPNOTSUPP;
1825 break;
1826 }
1827
1828 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1829 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001830 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001831 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 if (err)
1833 break;
1834 }
1835
Johan Hedberg01178cd2013-03-05 20:37:41 +02001836 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001837 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 break;
1839
1840 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001841 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001842 HCI_INIT_TIMEOUT, NULL);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001843
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001844 /* Ensure that the connectable and discoverable states
1845 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001846 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001847 if (!err)
1848 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 break;
1850
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001851 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001852 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001853 HCI_INIT_TIMEOUT, NULL);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001854 break;
1855
1856 case HCISETLINKMODE:
1857 hdev->link_mode = ((__u16) dr.dev_opt) &
1858 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1859 break;
1860
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 case HCISETPTYPE:
1862 hdev->pkt_type = (__u16) dr.dev_opt;
1863 break;
1864
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001866 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1867 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 break;
1869
1870 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001871 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1872 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 break;
1874
1875 default:
1876 err = -EINVAL;
1877 break;
1878 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001879
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001880done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 hci_dev_put(hdev);
1882 return err;
1883}
1884
1885int hci_get_dev_list(void __user *arg)
1886{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001887 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 struct hci_dev_list_req *dl;
1889 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 int n = 0, size, err;
1891 __u16 dev_num;
1892
1893 if (get_user(dev_num, (__u16 __user *) arg))
1894 return -EFAULT;
1895
1896 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1897 return -EINVAL;
1898
1899 size = sizeof(*dl) + dev_num * sizeof(*dr);
1900
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001901 dl = kzalloc(size, GFP_KERNEL);
1902 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 return -ENOMEM;
1904
1905 dr = dl->dev_req;
1906
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001907 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001908 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001909 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001910
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001911 /* When the auto-off is configured it means the transport
1912 * is running, but in that case still indicate that the
1913 * device is actually down.
1914 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001915 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001916 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001917
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001919 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001920
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 if (++n >= dev_num)
1922 break;
1923 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001924 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925
1926 dl->dev_num = n;
1927 size = sizeof(*dl) + n * sizeof(*dr);
1928
1929 err = copy_to_user(arg, dl, size);
1930 kfree(dl);
1931
1932 return err ? -EFAULT : 0;
1933}
1934
1935int hci_get_dev_info(void __user *arg)
1936{
1937 struct hci_dev *hdev;
1938 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001939 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 int err = 0;
1941
1942 if (copy_from_user(&di, arg, sizeof(di)))
1943 return -EFAULT;
1944
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001945 hdev = hci_dev_get(di.dev_id);
1946 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 return -ENODEV;
1948
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001949 /* When the auto-off is configured it means the transport
1950 * is running, but in that case still indicate that the
1951 * device is actually down.
1952 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001953 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001954 flags = hdev->flags & ~BIT(HCI_UP);
1955 else
1956 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001957
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 strcpy(di.name, hdev->name);
1959 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001960 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001961 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001963 if (lmp_bredr_capable(hdev)) {
1964 di.acl_mtu = hdev->acl_mtu;
1965 di.acl_pkts = hdev->acl_pkts;
1966 di.sco_mtu = hdev->sco_mtu;
1967 di.sco_pkts = hdev->sco_pkts;
1968 } else {
1969 di.acl_mtu = hdev->le_mtu;
1970 di.acl_pkts = hdev->le_pkts;
1971 di.sco_mtu = 0;
1972 di.sco_pkts = 0;
1973 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 di.link_policy = hdev->link_policy;
1975 di.link_mode = hdev->link_mode;
1976
1977 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1978 memcpy(&di.features, &hdev->features, sizeof(di.features));
1979
1980 if (copy_to_user(arg, &di, sizeof(di)))
1981 err = -EFAULT;
1982
1983 hci_dev_put(hdev);
1984
1985 return err;
1986}
1987
1988/* ---- Interface to HCI drivers ---- */
1989
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001990static int hci_rfkill_set_block(void *data, bool blocked)
1991{
1992 struct hci_dev *hdev = data;
1993
1994 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1995
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001996 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001997 return -EBUSY;
1998
Johan Hedberg5e130362013-09-13 08:58:17 +03001999 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002000 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002001 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2002 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002003 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002004 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002005 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002006 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002007
2008 return 0;
2009}
2010
2011static const struct rfkill_ops hci_rfkill_ops = {
2012 .set_block = hci_rfkill_set_block,
2013};
2014
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002015static void hci_power_on(struct work_struct *work)
2016{
2017 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002018 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002019
2020 BT_DBG("%s", hdev->name);
2021
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002022 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002023 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302024 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002025 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302026 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002027 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002028 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002029
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002030 /* During the HCI setup phase, a few error conditions are
2031 * ignored and they need to be checked now. If they are still
2032 * valid, it is important to turn the device back off.
2033 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002034 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2035 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002036 (hdev->dev_type == HCI_BREDR &&
2037 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2038 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002039 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002040 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002041 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002042 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2043 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002044 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002045
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002046 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002047 /* For unconfigured devices, set the HCI_RAW flag
2048 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002049 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002050 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002051 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002052
2053 /* For fully configured devices, this will send
2054 * the Index Added event. For unconfigured devices,
2055 * it will send Unconfigued Index Added event.
2056 *
2057 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2058 * and no event will be send.
2059 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002060 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002061 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002062 /* When the controller is now configured, then it
2063 * is important to clear the HCI_RAW flag.
2064 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002065 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002066 clear_bit(HCI_RAW, &hdev->flags);
2067
Marcel Holtmannd603b762014-07-06 12:11:14 +02002068 /* Powering on the controller with HCI_CONFIG set only
2069 * happens with the transition from unconfigured to
2070 * configured. This will send the Index Added event.
2071 */
2072 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002073 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002074}
2075
2076static void hci_power_off(struct work_struct *work)
2077{
Johan Hedberg32435532011-11-07 22:16:04 +02002078 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002079 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002080
2081 BT_DBG("%s", hdev->name);
2082
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002083 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002084}
2085
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002086static void hci_error_reset(struct work_struct *work)
2087{
2088 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2089
2090 BT_DBG("%s", hdev->name);
2091
2092 if (hdev->hw_error)
2093 hdev->hw_error(hdev, hdev->hw_error_code);
2094 else
2095 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2096 hdev->hw_error_code);
2097
2098 if (hci_dev_do_close(hdev))
2099 return;
2100
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002101 hci_dev_do_open(hdev);
2102}
2103
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002104static void hci_discov_off(struct work_struct *work)
2105{
2106 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002107
2108 hdev = container_of(work, struct hci_dev, discov_off.work);
2109
2110 BT_DBG("%s", hdev->name);
2111
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002112 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002113}
2114
Florian Grandel5d900e42015-06-18 03:16:35 +02002115static void hci_adv_timeout_expire(struct work_struct *work)
2116{
2117 struct hci_dev *hdev;
2118
2119 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2120
2121 BT_DBG("%s", hdev->name);
2122
2123 mgmt_adv_timeout_expired(hdev);
2124}
2125
Johan Hedberg35f74982014-02-18 17:14:32 +02002126void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002127{
Johan Hedberg48210022013-01-27 00:31:28 +02002128 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002129
Johan Hedberg48210022013-01-27 00:31:28 +02002130 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2131 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002132 kfree(uuid);
2133 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002134}
2135
Johan Hedberg35f74982014-02-18 17:14:32 +02002136void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002137{
Johan Hedberg0378b592014-11-19 15:22:22 +02002138 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002139
Johan Hedberg0378b592014-11-19 15:22:22 +02002140 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2141 list_del_rcu(&key->list);
2142 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002143 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002144}
2145
Johan Hedberg35f74982014-02-18 17:14:32 +02002146void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002147{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002148 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002149
Johan Hedberg970d0f12014-11-13 14:37:47 +02002150 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2151 list_del_rcu(&k->list);
2152 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002153 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002154}
2155
Johan Hedberg970c4e42014-02-18 10:19:33 +02002156void hci_smp_irks_clear(struct hci_dev *hdev)
2157{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002158 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002159
Johan Hedbergadae20c2014-11-13 14:37:48 +02002160 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2161 list_del_rcu(&k->list);
2162 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002163 }
2164}
2165
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002166struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2167{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002168 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002169
Johan Hedberg0378b592014-11-19 15:22:22 +02002170 rcu_read_lock();
2171 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2172 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2173 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002174 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002175 }
2176 }
2177 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002178
2179 return NULL;
2180}
2181
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302182static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002183 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002184{
2185 /* Legacy key */
2186 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302187 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002188
2189 /* Debug keys are insecure so don't store them persistently */
2190 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302191 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002192
2193 /* Changed combination key and there's no previous one */
2194 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302195 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002196
2197 /* Security mode 3 case */
2198 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302199 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002200
Johan Hedberge3befab2014-06-01 16:33:39 +03002201 /* BR/EDR key derived using SC from an LE link */
2202 if (conn->type == LE_LINK)
2203 return true;
2204
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002205 /* Neither local nor remote side had no-bonding as requirement */
2206 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302207 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002208
2209 /* Local side had dedicated bonding as requirement */
2210 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302211 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002212
2213 /* Remote side had dedicated bonding as requirement */
2214 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302215 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002216
2217 /* If none of the above criteria match, then don't store the key
2218 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302219 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002220}
2221
Johan Hedberge804d252014-07-16 11:42:28 +03002222static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002223{
Johan Hedberge804d252014-07-16 11:42:28 +03002224 if (type == SMP_LTK)
2225 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002226
Johan Hedberge804d252014-07-16 11:42:28 +03002227 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002228}
2229
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002230struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2231 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002232{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002233 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002234
Johan Hedberg970d0f12014-11-13 14:37:47 +02002235 rcu_read_lock();
2236 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002237 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2238 continue;
2239
Johan Hedberg923e2412014-12-03 12:43:39 +02002240 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002241 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002242 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002243 }
2244 }
2245 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002246
2247 return NULL;
2248}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002249
Johan Hedberg970c4e42014-02-18 10:19:33 +02002250struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2251{
2252 struct smp_irk *irk;
2253
Johan Hedbergadae20c2014-11-13 14:37:48 +02002254 rcu_read_lock();
2255 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2256 if (!bacmp(&irk->rpa, rpa)) {
2257 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002258 return irk;
2259 }
2260 }
2261
Johan Hedbergadae20c2014-11-13 14:37:48 +02002262 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2263 if (smp_irk_matches(hdev, irk->val, rpa)) {
2264 bacpy(&irk->rpa, rpa);
2265 rcu_read_unlock();
2266 return irk;
2267 }
2268 }
2269 rcu_read_unlock();
2270
Johan Hedberg970c4e42014-02-18 10:19:33 +02002271 return NULL;
2272}
2273
2274struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2275 u8 addr_type)
2276{
2277 struct smp_irk *irk;
2278
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002279 /* Identity Address must be public or static random */
2280 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2281 return NULL;
2282
Johan Hedbergadae20c2014-11-13 14:37:48 +02002283 rcu_read_lock();
2284 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002285 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002286 bacmp(bdaddr, &irk->bdaddr) == 0) {
2287 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002288 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002289 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002290 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002291 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002292
2293 return NULL;
2294}
2295
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002296struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002297 bdaddr_t *bdaddr, u8 *val, u8 type,
2298 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002299{
2300 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302301 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002302
2303 old_key = hci_find_link_key(hdev, bdaddr);
2304 if (old_key) {
2305 old_key_type = old_key->type;
2306 key = old_key;
2307 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002308 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002309 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002310 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002311 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002312 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002313 }
2314
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002315 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002316
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002317 /* Some buggy controller combinations generate a changed
2318 * combination key for legacy pairing even when there's no
2319 * previous key */
2320 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002321 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002322 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002323 if (conn)
2324 conn->key_type = type;
2325 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002326
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002327 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002328 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002329 key->pin_len = pin_len;
2330
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002331 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002332 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002333 else
2334 key->type = type;
2335
Johan Hedberg7652ff62014-06-24 13:15:49 +03002336 if (persistent)
2337 *persistent = hci_persistent_key(hdev, conn, type,
2338 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002339
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002340 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002341}
2342
Johan Hedbergca9142b2014-02-19 14:57:44 +02002343struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002344 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002345 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002346{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002347 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002348 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002349
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002350 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002351 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002352 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002353 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002354 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002355 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002356 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002357 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002358 }
2359
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002360 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002361 key->bdaddr_type = addr_type;
2362 memcpy(key->val, tk, sizeof(key->val));
2363 key->authenticated = authenticated;
2364 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002365 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002366 key->enc_size = enc_size;
2367 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002368
Johan Hedbergca9142b2014-02-19 14:57:44 +02002369 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002370}
2371
Johan Hedbergca9142b2014-02-19 14:57:44 +02002372struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2373 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002374{
2375 struct smp_irk *irk;
2376
2377 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2378 if (!irk) {
2379 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2380 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002381 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002382
2383 bacpy(&irk->bdaddr, bdaddr);
2384 irk->addr_type = addr_type;
2385
Johan Hedbergadae20c2014-11-13 14:37:48 +02002386 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002387 }
2388
2389 memcpy(irk->val, val, 16);
2390 bacpy(&irk->rpa, rpa);
2391
Johan Hedbergca9142b2014-02-19 14:57:44 +02002392 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002393}
2394
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002395int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2396{
2397 struct link_key *key;
2398
2399 key = hci_find_link_key(hdev, bdaddr);
2400 if (!key)
2401 return -ENOENT;
2402
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002403 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002404
Johan Hedberg0378b592014-11-19 15:22:22 +02002405 list_del_rcu(&key->list);
2406 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002407
2408 return 0;
2409}
2410
Johan Hedberge0b2b272014-02-18 17:14:31 +02002411int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002412{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002413 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002414 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002415
Johan Hedberg970d0f12014-11-13 14:37:47 +02002416 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002417 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002418 continue;
2419
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002420 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002421
Johan Hedberg970d0f12014-11-13 14:37:47 +02002422 list_del_rcu(&k->list);
2423 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002424 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002425 }
2426
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002427 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002428}
2429
Johan Hedberga7ec7332014-02-18 17:14:35 +02002430void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2431{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002432 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002433
Johan Hedbergadae20c2014-11-13 14:37:48 +02002434 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002435 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2436 continue;
2437
2438 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2439
Johan Hedbergadae20c2014-11-13 14:37:48 +02002440 list_del_rcu(&k->list);
2441 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002442 }
2443}
2444
Johan Hedberg55e76b32015-03-10 22:34:40 +02002445bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2446{
2447 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002448 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002449 u8 addr_type;
2450
2451 if (type == BDADDR_BREDR) {
2452 if (hci_find_link_key(hdev, bdaddr))
2453 return true;
2454 return false;
2455 }
2456
2457 /* Convert to HCI addr type which struct smp_ltk uses */
2458 if (type == BDADDR_LE_PUBLIC)
2459 addr_type = ADDR_LE_DEV_PUBLIC;
2460 else
2461 addr_type = ADDR_LE_DEV_RANDOM;
2462
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002463 irk = hci_get_irk(hdev, bdaddr, addr_type);
2464 if (irk) {
2465 bdaddr = &irk->bdaddr;
2466 addr_type = irk->addr_type;
2467 }
2468
Johan Hedberg55e76b32015-03-10 22:34:40 +02002469 rcu_read_lock();
2470 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002471 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2472 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002473 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002474 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002475 }
2476 rcu_read_unlock();
2477
2478 return false;
2479}
2480
Ville Tervo6bd32322011-02-16 16:32:41 +02002481/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002482static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002483{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002484 struct hci_dev *hdev = container_of(work, struct hci_dev,
2485 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002486
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002487 if (hdev->sent_cmd) {
2488 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2489 u16 opcode = __le16_to_cpu(sent->opcode);
2490
2491 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2492 } else {
2493 BT_ERR("%s command tx timeout", hdev->name);
2494 }
2495
Ville Tervo6bd32322011-02-16 16:32:41 +02002496 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002497 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002498}
2499
Szymon Janc2763eda2011-03-22 13:12:22 +01002500struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002501 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002502{
2503 struct oob_data *data;
2504
Johan Hedberg6928a922014-10-26 20:46:09 +01002505 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2506 if (bacmp(bdaddr, &data->bdaddr) != 0)
2507 continue;
2508 if (data->bdaddr_type != bdaddr_type)
2509 continue;
2510 return data;
2511 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002512
2513 return NULL;
2514}
2515
Johan Hedberg6928a922014-10-26 20:46:09 +01002516int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2517 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002518{
2519 struct oob_data *data;
2520
Johan Hedberg6928a922014-10-26 20:46:09 +01002521 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002522 if (!data)
2523 return -ENOENT;
2524
Johan Hedberg6928a922014-10-26 20:46:09 +01002525 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002526
2527 list_del(&data->list);
2528 kfree(data);
2529
2530 return 0;
2531}
2532
Johan Hedberg35f74982014-02-18 17:14:32 +02002533void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002534{
2535 struct oob_data *data, *n;
2536
2537 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2538 list_del(&data->list);
2539 kfree(data);
2540 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002541}
2542
Marcel Holtmann07988722014-01-10 02:07:29 -08002543int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002544 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002545 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002546{
2547 struct oob_data *data;
2548
Johan Hedberg6928a922014-10-26 20:46:09 +01002549 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002550 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002551 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002552 if (!data)
2553 return -ENOMEM;
2554
2555 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002556 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002557 list_add(&data->list, &hdev->remote_oob_data);
2558 }
2559
Johan Hedberg81328d52014-10-26 20:33:47 +01002560 if (hash192 && rand192) {
2561 memcpy(data->hash192, hash192, sizeof(data->hash192));
2562 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002563 if (hash256 && rand256)
2564 data->present = 0x03;
Johan Hedberg81328d52014-10-26 20:33:47 +01002565 } else {
2566 memset(data->hash192, 0, sizeof(data->hash192));
2567 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002568 if (hash256 && rand256)
2569 data->present = 0x02;
2570 else
2571 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002572 }
2573
Johan Hedberg81328d52014-10-26 20:33:47 +01002574 if (hash256 && rand256) {
2575 memcpy(data->hash256, hash256, sizeof(data->hash256));
2576 memcpy(data->rand256, rand256, sizeof(data->rand256));
2577 } else {
2578 memset(data->hash256, 0, sizeof(data->hash256));
2579 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002580 if (hash192 && rand192)
2581 data->present = 0x01;
Johan Hedberg81328d52014-10-26 20:33:47 +01002582 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002583
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002584 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002585
2586 return 0;
2587}
2588
Florian Grandeld2609b32015-06-18 03:16:34 +02002589/* This function requires the caller holds hdev->lock */
2590struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2591{
2592 struct adv_info *adv_instance;
2593
2594 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2595 if (adv_instance->instance == instance)
2596 return adv_instance;
2597 }
2598
2599 return NULL;
2600}
2601
2602/* This function requires the caller holds hdev->lock */
Prasanna Karthik74b93e92015-11-18 12:38:41 +00002603struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2604{
Florian Grandeld2609b32015-06-18 03:16:34 +02002605 struct adv_info *cur_instance;
2606
2607 cur_instance = hci_find_adv_instance(hdev, instance);
2608 if (!cur_instance)
2609 return NULL;
2610
2611 if (cur_instance == list_last_entry(&hdev->adv_instances,
2612 struct adv_info, list))
2613 return list_first_entry(&hdev->adv_instances,
2614 struct adv_info, list);
2615 else
2616 return list_next_entry(cur_instance, list);
2617}
2618
2619/* This function requires the caller holds hdev->lock */
2620int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2621{
2622 struct adv_info *adv_instance;
2623
2624 adv_instance = hci_find_adv_instance(hdev, instance);
2625 if (!adv_instance)
2626 return -ENOENT;
2627
2628 BT_DBG("%s removing %dMR", hdev->name, instance);
2629
Florian Grandel5d900e42015-06-18 03:16:35 +02002630 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2631 cancel_delayed_work(&hdev->adv_instance_expire);
2632 hdev->adv_instance_timeout = 0;
2633 }
2634
Florian Grandeld2609b32015-06-18 03:16:34 +02002635 list_del(&adv_instance->list);
2636 kfree(adv_instance);
2637
2638 hdev->adv_instance_cnt--;
2639
2640 return 0;
2641}
2642
2643/* This function requires the caller holds hdev->lock */
2644void hci_adv_instances_clear(struct hci_dev *hdev)
2645{
2646 struct adv_info *adv_instance, *n;
2647
Florian Grandel5d900e42015-06-18 03:16:35 +02002648 if (hdev->adv_instance_timeout) {
2649 cancel_delayed_work(&hdev->adv_instance_expire);
2650 hdev->adv_instance_timeout = 0;
2651 }
2652
Florian Grandeld2609b32015-06-18 03:16:34 +02002653 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2654 list_del(&adv_instance->list);
2655 kfree(adv_instance);
2656 }
2657
2658 hdev->adv_instance_cnt = 0;
2659}
2660
2661/* This function requires the caller holds hdev->lock */
2662int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2663 u16 adv_data_len, u8 *adv_data,
2664 u16 scan_rsp_len, u8 *scan_rsp_data,
2665 u16 timeout, u16 duration)
2666{
2667 struct adv_info *adv_instance;
2668
2669 adv_instance = hci_find_adv_instance(hdev, instance);
2670 if (adv_instance) {
2671 memset(adv_instance->adv_data, 0,
2672 sizeof(adv_instance->adv_data));
2673 memset(adv_instance->scan_rsp_data, 0,
2674 sizeof(adv_instance->scan_rsp_data));
2675 } else {
2676 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2677 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2678 return -EOVERFLOW;
2679
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002680 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002681 if (!adv_instance)
2682 return -ENOMEM;
2683
Florian Grandelfffd38b2015-06-18 03:16:47 +02002684 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002685 adv_instance->instance = instance;
2686 list_add(&adv_instance->list, &hdev->adv_instances);
2687 hdev->adv_instance_cnt++;
2688 }
2689
2690 adv_instance->flags = flags;
2691 adv_instance->adv_data_len = adv_data_len;
2692 adv_instance->scan_rsp_len = scan_rsp_len;
2693
2694 if (adv_data_len)
2695 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2696
2697 if (scan_rsp_len)
2698 memcpy(adv_instance->scan_rsp_data,
2699 scan_rsp_data, scan_rsp_len);
2700
2701 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002702 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002703
2704 if (duration == 0)
2705 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2706 else
2707 adv_instance->duration = duration;
2708
2709 BT_DBG("%s for %dMR", hdev->name, instance);
2710
2711 return 0;
2712}
2713
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002714struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002715 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002716{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002717 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002718
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002719 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002720 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002721 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002722 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002723
2724 return NULL;
2725}
2726
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002727void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002728{
2729 struct list_head *p, *n;
2730
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002731 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002732 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002733
2734 list_del(p);
2735 kfree(b);
2736 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002737}
2738
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002739int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002740{
2741 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002742
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002743 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002744 return -EBADF;
2745
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002746 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002747 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002748
Johan Hedberg27f70f32014-07-21 10:50:06 +03002749 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002750 if (!entry)
2751 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002752
2753 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002754 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002755
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002756 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002757
2758 return 0;
2759}
2760
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002761int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002762{
2763 struct bdaddr_list *entry;
2764
Johan Hedberg35f74982014-02-18 17:14:32 +02002765 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002766 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002767 return 0;
2768 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002769
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002770 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002771 if (!entry)
2772 return -ENOENT;
2773
2774 list_del(&entry->list);
2775 kfree(entry);
2776
2777 return 0;
2778}
2779
Andre Guedes15819a72014-02-03 13:56:18 -03002780/* This function requires the caller holds hdev->lock */
2781struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2782 bdaddr_t *addr, u8 addr_type)
2783{
2784 struct hci_conn_params *params;
2785
2786 list_for_each_entry(params, &hdev->le_conn_params, list) {
2787 if (bacmp(&params->addr, addr) == 0 &&
2788 params->addr_type == addr_type) {
2789 return params;
2790 }
2791 }
2792
2793 return NULL;
2794}
2795
2796/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002797struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2798 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002799{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002800 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002801
Johan Hedberg501f8822014-07-04 12:37:26 +03002802 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002803 if (bacmp(&param->addr, addr) == 0 &&
2804 param->addr_type == addr_type)
2805 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002806 }
2807
2808 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002809}
2810
2811/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002812struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2813 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002814{
2815 struct hci_conn_params *params;
2816
2817 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002818 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002819 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002820
2821 params = kzalloc(sizeof(*params), GFP_KERNEL);
2822 if (!params) {
2823 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002824 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002825 }
2826
2827 bacpy(&params->addr, addr);
2828 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002829
2830 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002831 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002832
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002833 params->conn_min_interval = hdev->le_conn_min_interval;
2834 params->conn_max_interval = hdev->le_conn_max_interval;
2835 params->conn_latency = hdev->le_conn_latency;
2836 params->supervision_timeout = hdev->le_supv_timeout;
2837 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2838
2839 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2840
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002841 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002842}
2843
Johan Hedbergf6c63242014-08-15 21:06:59 +03002844static void hci_conn_params_free(struct hci_conn_params *params)
2845{
2846 if (params->conn) {
2847 hci_conn_drop(params->conn);
2848 hci_conn_put(params->conn);
2849 }
2850
2851 list_del(&params->action);
2852 list_del(&params->list);
2853 kfree(params);
2854}
2855
Andre Guedes15819a72014-02-03 13:56:18 -03002856/* This function requires the caller holds hdev->lock */
2857void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2858{
2859 struct hci_conn_params *params;
2860
2861 params = hci_conn_params_lookup(hdev, addr, addr_type);
2862 if (!params)
2863 return;
2864
Johan Hedbergf6c63242014-08-15 21:06:59 +03002865 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002866
Johan Hedberg95305ba2014-07-04 12:37:21 +03002867 hci_update_background_scan(hdev);
2868
Andre Guedes15819a72014-02-03 13:56:18 -03002869 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2870}
2871
2872/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03002873void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002874{
2875 struct hci_conn_params *params, *tmp;
2876
2877 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03002878 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2879 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02002880
2881 /* If trying to estabilish one time connection to disabled
2882 * device, leave the params, but mark them as just once.
2883 */
2884 if (params->explicit_connect) {
2885 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2886 continue;
2887 }
2888
Andre Guedes15819a72014-02-03 13:56:18 -03002889 list_del(&params->list);
2890 kfree(params);
2891 }
2892
Johan Hedberg55af49a2014-07-02 17:37:26 +03002893 BT_DBG("All LE disabled connection parameters were removed");
2894}
2895
2896/* This function requires the caller holds hdev->lock */
Johan Hedberg030e7f82015-11-10 09:44:53 +02002897static void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002898{
2899 struct hci_conn_params *params, *tmp;
2900
Johan Hedbergf6c63242014-08-15 21:06:59 +03002901 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2902 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002903
2904 BT_DBG("All LE connection parameters were removed");
2905}
2906
Johan Hedberga1f4c312014-02-27 14:05:41 +02002907/* Copy the Identity Address of the controller.
2908 *
2909 * If the controller has a public BD_ADDR, then by default use that one.
2910 * If this is a LE only controller without a public address, default to
2911 * the static random address.
2912 *
2913 * For debugging purposes it is possible to force controllers with a
2914 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002915 *
2916 * In case BR/EDR has been disabled on a dual-mode controller and
2917 * userspace has configured a static address, then that address
2918 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002919 */
2920void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2921 u8 *bdaddr_type)
2922{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002923 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002924 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002925 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002926 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002927 bacpy(bdaddr, &hdev->static_addr);
2928 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2929 } else {
2930 bacpy(bdaddr, &hdev->bdaddr);
2931 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2932 }
2933}
2934
David Herrmann9be0dab2012-04-22 14:39:57 +02002935/* Alloc HCI device */
2936struct hci_dev *hci_alloc_dev(void)
2937{
2938 struct hci_dev *hdev;
2939
Johan Hedberg27f70f32014-07-21 10:50:06 +03002940 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02002941 if (!hdev)
2942 return NULL;
2943
David Herrmannb1b813d2012-04-22 14:39:58 +02002944 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2945 hdev->esco_type = (ESCO_HV1);
2946 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002947 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2948 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02002949 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002950 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2951 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02002952 hdev->adv_instance_cnt = 0;
2953 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02002954 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02002955
David Herrmannb1b813d2012-04-22 14:39:58 +02002956 hdev->sniff_max_interval = 800;
2957 hdev->sniff_min_interval = 80;
2958
Marcel Holtmann3f959d42014-02-20 11:55:56 -08002959 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02002960 hdev->le_adv_min_interval = 0x0800;
2961 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002962 hdev->le_scan_interval = 0x0060;
2963 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002964 hdev->le_conn_min_interval = 0x0028;
2965 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02002966 hdev->le_conn_latency = 0x0000;
2967 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01002968 hdev->le_def_tx_len = 0x001b;
2969 hdev->le_def_tx_time = 0x0148;
2970 hdev->le_max_tx_len = 0x001b;
2971 hdev->le_max_tx_time = 0x0148;
2972 hdev->le_max_rx_len = 0x001b;
2973 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002974
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002975 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01002976 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02002977 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2978 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002979
David Herrmannb1b813d2012-04-22 14:39:58 +02002980 mutex_init(&hdev->lock);
2981 mutex_init(&hdev->req_lock);
2982
2983 INIT_LIST_HEAD(&hdev->mgmt_pending);
2984 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03002985 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02002986 INIT_LIST_HEAD(&hdev->uuids);
2987 INIT_LIST_HEAD(&hdev->link_keys);
2988 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002989 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02002990 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002991 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03002992 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03002993 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03002994 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002995 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02002996 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02002997
2998 INIT_WORK(&hdev->rx_work, hci_rx_work);
2999 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3000 INIT_WORK(&hdev->tx_work, hci_tx_work);
3001 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003002 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003003
David Herrmannb1b813d2012-04-22 14:39:58 +02003004 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3005 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
Florian Grandel5d900e42015-06-18 03:16:35 +02003006 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
David Herrmannb1b813d2012-04-22 14:39:58 +02003007
David Herrmannb1b813d2012-04-22 14:39:58 +02003008 skb_queue_head_init(&hdev->rx_q);
3009 skb_queue_head_init(&hdev->cmd_q);
3010 skb_queue_head_init(&hdev->raw_q);
3011
3012 init_waitqueue_head(&hdev->req_wait_q);
3013
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003014 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003015
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003016 hci_request_setup(hdev);
3017
David Herrmannb1b813d2012-04-22 14:39:58 +02003018 hci_init_sysfs(hdev);
3019 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003020
3021 return hdev;
3022}
3023EXPORT_SYMBOL(hci_alloc_dev);
3024
3025/* Free HCI device */
3026void hci_free_dev(struct hci_dev *hdev)
3027{
David Herrmann9be0dab2012-04-22 14:39:57 +02003028 /* will free via device release */
3029 put_device(&hdev->dev);
3030}
3031EXPORT_SYMBOL(hci_free_dev);
3032
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033/* Register HCI device */
3034int hci_register_dev(struct hci_dev *hdev)
3035{
David Herrmannb1b813d2012-04-22 14:39:58 +02003036 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037
Marcel Holtmann74292d52014-07-06 15:50:27 +02003038 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 return -EINVAL;
3040
Mat Martineau08add512011-11-02 16:18:36 -07003041 /* Do not allow HCI_AMP devices to register at index 0,
3042 * so the index can be used as the AMP controller ID.
3043 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003044 switch (hdev->dev_type) {
3045 case HCI_BREDR:
3046 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3047 break;
3048 case HCI_AMP:
3049 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3050 break;
3051 default:
3052 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003054
Sasha Levin3df92b32012-05-27 22:36:56 +02003055 if (id < 0)
3056 return id;
3057
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 sprintf(hdev->name, "hci%d", id);
3059 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003060
3061 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3062
Kees Cookd8537542013-07-03 15:04:57 -07003063 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3064 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003065 if (!hdev->workqueue) {
3066 error = -ENOMEM;
3067 goto err;
3068 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003069
Kees Cookd8537542013-07-03 15:04:57 -07003070 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3071 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003072 if (!hdev->req_workqueue) {
3073 destroy_workqueue(hdev->workqueue);
3074 error = -ENOMEM;
3075 goto err;
3076 }
3077
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003078 if (!IS_ERR_OR_NULL(bt_debugfs))
3079 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3080
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003081 dev_set_name(&hdev->dev, "%s", hdev->name);
3082
3083 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003084 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003085 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003087 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003088 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3089 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003090 if (hdev->rfkill) {
3091 if (rfkill_register(hdev->rfkill) < 0) {
3092 rfkill_destroy(hdev->rfkill);
3093 hdev->rfkill = NULL;
3094 }
3095 }
3096
Johan Hedberg5e130362013-09-13 08:58:17 +03003097 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003098 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003099
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003100 hci_dev_set_flag(hdev, HCI_SETUP);
3101 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003102
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003103 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003104 /* Assume BR/EDR support until proven otherwise (such as
3105 * through reading supported features during init.
3106 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003107 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003108 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003109
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003110 write_lock(&hci_dev_list_lock);
3111 list_add(&hdev->list, &hci_dev_list);
3112 write_unlock(&hci_dev_list_lock);
3113
Marcel Holtmann4a964402014-07-02 19:10:33 +02003114 /* Devices that are marked for raw-only usage are unconfigured
3115 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003116 */
3117 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003118 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003119
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003120 hci_sock_dev_event(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003121 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122
Johan Hedberg19202572013-01-14 22:33:51 +02003123 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003124
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003126
David Herrmann33ca9542011-10-08 14:58:49 +02003127err_wqueue:
3128 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003129 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003130err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003131 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003132
David Herrmann33ca9542011-10-08 14:58:49 +02003133 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134}
3135EXPORT_SYMBOL(hci_register_dev);
3136
3137/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003138void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003140 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003141
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003142 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003144 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003145
Sasha Levin3df92b32012-05-27 22:36:56 +02003146 id = hdev->id;
3147
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003148 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003150 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151
3152 hci_dev_do_close(hdev);
3153
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003154 cancel_work_sync(&hdev->power_on);
3155
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003156 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003157 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3158 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003159 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003160 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003161 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003162 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003163
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003164 /* mgmt_index_removed should take care of emptying the
3165 * pending list */
3166 BUG_ON(!list_empty(&hdev->mgmt_pending));
3167
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003168 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003170 if (hdev->rfkill) {
3171 rfkill_unregister(hdev->rfkill);
3172 rfkill_destroy(hdev->rfkill);
3173 }
3174
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003175 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003176
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003177 debugfs_remove_recursive(hdev->debugfs);
3178
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003179 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003180 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003181
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003182 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003183 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003184 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003185 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003186 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003187 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003188 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003189 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003190 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003191 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003192 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003193 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003194 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003195
David Herrmanndc946bd2012-01-07 15:47:24 +01003196 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003197
3198 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199}
3200EXPORT_SYMBOL(hci_unregister_dev);
3201
3202/* Suspend HCI device */
3203int hci_suspend_dev(struct hci_dev *hdev)
3204{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003205 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206 return 0;
3207}
3208EXPORT_SYMBOL(hci_suspend_dev);
3209
3210/* Resume HCI device */
3211int hci_resume_dev(struct hci_dev *hdev)
3212{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003213 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214 return 0;
3215}
3216EXPORT_SYMBOL(hci_resume_dev);
3217
Marcel Holtmann75e05692014-11-02 08:15:38 +01003218/* Reset HCI device */
3219int hci_reset_dev(struct hci_dev *hdev)
3220{
3221 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3222 struct sk_buff *skb;
3223
3224 skb = bt_skb_alloc(3, GFP_ATOMIC);
3225 if (!skb)
3226 return -ENOMEM;
3227
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003228 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
Marcel Holtmann75e05692014-11-02 08:15:38 +01003229 memcpy(skb_put(skb, 3), hw_err, 3);
3230
3231 /* Send Hardware Error to upper stack */
3232 return hci_recv_frame(hdev, skb);
3233}
3234EXPORT_SYMBOL(hci_reset_dev);
3235
Marcel Holtmann76bca882009-11-18 00:40:39 +01003236/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003237int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003238{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003239 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003240 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003241 kfree_skb(skb);
3242 return -ENXIO;
3243 }
3244
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003245 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3246 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3247 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmannfe806dc2015-10-08 03:14:28 +02003248 kfree_skb(skb);
3249 return -EINVAL;
3250 }
3251
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003252 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003253 bt_cb(skb)->incoming = 1;
3254
3255 /* Time stamp */
3256 __net_timestamp(skb);
3257
Marcel Holtmann76bca882009-11-18 00:40:39 +01003258 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003259 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003260
Marcel Holtmann76bca882009-11-18 00:40:39 +01003261 return 0;
3262}
3263EXPORT_SYMBOL(hci_recv_frame);
3264
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003265/* Receive diagnostic message from HCI drivers */
3266int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3267{
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003268 /* Mark as diagnostic packet */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003269 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003270
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003271 /* Time stamp */
3272 __net_timestamp(skb);
3273
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003274 skb_queue_tail(&hdev->rx_q, skb);
3275 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003276
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003277 return 0;
3278}
3279EXPORT_SYMBOL(hci_recv_diag);
3280
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281/* ---- Interface to upper protocols ---- */
3282
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283int hci_register_cb(struct hci_cb *cb)
3284{
3285 BT_DBG("%p name %s", cb, cb->name);
3286
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003287 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003288 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003289 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290
3291 return 0;
3292}
3293EXPORT_SYMBOL(hci_register_cb);
3294
3295int hci_unregister_cb(struct hci_cb *cb)
3296{
3297 BT_DBG("%p name %s", cb, cb->name);
3298
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003299 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003301 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302
3303 return 0;
3304}
3305EXPORT_SYMBOL(hci_unregister_cb);
3306
Marcel Holtmann51086992013-10-10 14:54:19 -07003307static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003309 int err;
3310
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003311 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3312 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003314 /* Time stamp */
3315 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003317 /* Send copy to monitor */
3318 hci_send_to_monitor(hdev, skb);
3319
3320 if (atomic_read(&hdev->promisc)) {
3321 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003322 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 }
3324
3325 /* Get rid of skb owner, prior to sending to the driver. */
3326 skb_orphan(skb);
3327
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003328 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3329 kfree_skb(skb);
3330 return;
3331 }
3332
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003333 err = hdev->send(hdev, skb);
3334 if (err < 0) {
3335 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3336 kfree_skb(skb);
3337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338}
3339
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003340/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003341int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3342 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003343{
3344 struct sk_buff *skb;
3345
3346 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3347
3348 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3349 if (!skb) {
3350 BT_ERR("%s no memory for command", hdev->name);
3351 return -ENOMEM;
3352 }
3353
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003354 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003355 * single-command requests.
3356 */
Johan Hedberg44d27132015-11-05 09:31:40 +02003357 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg11714b32013-03-05 20:37:47 +02003358
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003360 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361
3362 return 0;
3363}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364
3365/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003366void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367{
3368 struct hci_command_hdr *hdr;
3369
3370 if (!hdev->sent_cmd)
3371 return NULL;
3372
3373 hdr = (void *) hdev->sent_cmd->data;
3374
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003375 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 return NULL;
3377
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003378 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379
3380 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3381}
3382
Loic Poulainfbef1682015-09-29 15:05:44 +02003383/* Send HCI command and wait for command commplete event */
3384struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3385 const void *param, u32 timeout)
3386{
3387 struct sk_buff *skb;
3388
3389 if (!test_bit(HCI_UP, &hdev->flags))
3390 return ERR_PTR(-ENETDOWN);
3391
3392 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3393
Johan Hedbergb5044302015-11-10 09:44:55 +02003394 hci_req_sync_lock(hdev);
Loic Poulainfbef1682015-09-29 15:05:44 +02003395 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
Johan Hedbergb5044302015-11-10 09:44:55 +02003396 hci_req_sync_unlock(hdev);
Loic Poulainfbef1682015-09-29 15:05:44 +02003397
3398 return skb;
3399}
3400EXPORT_SYMBOL(hci_cmd_sync);
3401
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402/* Send ACL data */
3403static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3404{
3405 struct hci_acl_hdr *hdr;
3406 int len = skb->len;
3407
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003408 skb_push(skb, HCI_ACL_HDR_SIZE);
3409 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003410 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003411 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3412 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413}
3414
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003415static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003416 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003418 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419 struct hci_dev *hdev = conn->hdev;
3420 struct sk_buff *list;
3421
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003422 skb->len = skb_headlen(skb);
3423 skb->data_len = 0;
3424
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003425 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003426
3427 switch (hdev->dev_type) {
3428 case HCI_BREDR:
3429 hci_add_acl_hdr(skb, conn->handle, flags);
3430 break;
3431 case HCI_AMP:
3432 hci_add_acl_hdr(skb, chan->handle, flags);
3433 break;
3434 default:
3435 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3436 return;
3437 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003438
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003439 list = skb_shinfo(skb)->frag_list;
3440 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441 /* Non fragmented */
3442 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3443
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003444 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 } else {
3446 /* Fragmented */
3447 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3448
3449 skb_shinfo(skb)->frag_list = NULL;
3450
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003451 /* Queue all fragments atomically. We need to use spin_lock_bh
3452 * here because of 6LoWPAN links, as there this function is
3453 * called from softirq and using normal spin lock could cause
3454 * deadlocks.
3455 */
3456 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003458 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003459
3460 flags &= ~ACL_START;
3461 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 do {
3463 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003464
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003465 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003466 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467
3468 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3469
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003470 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 } while (list);
3472
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003473 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003475}
3476
3477void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3478{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003479 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003480
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003481 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003482
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003483 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003485 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487
3488/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003489void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490{
3491 struct hci_dev *hdev = conn->hdev;
3492 struct hci_sco_hdr hdr;
3493
3494 BT_DBG("%s len %d", hdev->name, skb->len);
3495
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003496 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 hdr.dlen = skb->len;
3498
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003499 skb_push(skb, HCI_SCO_HDR_SIZE);
3500 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003501 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003503 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003504
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003506 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508
3509/* ---- HCI TX task (outgoing data) ---- */
3510
3511/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003512static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3513 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514{
3515 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003516 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003517 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003519 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003521
3522 rcu_read_lock();
3523
3524 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003525 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003527
3528 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3529 continue;
3530
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531 num++;
3532
3533 if (c->sent < min) {
3534 min = c->sent;
3535 conn = c;
3536 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003537
3538 if (hci_conn_num(hdev, type) == num)
3539 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 }
3541
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003542 rcu_read_unlock();
3543
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003545 int cnt, q;
3546
3547 switch (conn->type) {
3548 case ACL_LINK:
3549 cnt = hdev->acl_cnt;
3550 break;
3551 case SCO_LINK:
3552 case ESCO_LINK:
3553 cnt = hdev->sco_cnt;
3554 break;
3555 case LE_LINK:
3556 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3557 break;
3558 default:
3559 cnt = 0;
3560 BT_ERR("Unknown link type");
3561 }
3562
3563 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 *quote = q ? q : 1;
3565 } else
3566 *quote = 0;
3567
3568 BT_DBG("conn %p quote %d", conn, *quote);
3569 return conn;
3570}
3571
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003572static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573{
3574 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003575 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576
Ville Tervobae1f5d92011-02-10 22:38:53 -03003577 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003579 rcu_read_lock();
3580
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003582 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003583 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003584 BT_ERR("%s killing stalled connection %pMR",
3585 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003586 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587 }
3588 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003589
3590 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591}
3592
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003593static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3594 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003595{
3596 struct hci_conn_hash *h = &hdev->conn_hash;
3597 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003598 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003599 struct hci_conn *conn;
3600 int cnt, q, conn_num = 0;
3601
3602 BT_DBG("%s", hdev->name);
3603
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003604 rcu_read_lock();
3605
3606 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003607 struct hci_chan *tmp;
3608
3609 if (conn->type != type)
3610 continue;
3611
3612 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3613 continue;
3614
3615 conn_num++;
3616
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003617 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003618 struct sk_buff *skb;
3619
3620 if (skb_queue_empty(&tmp->data_q))
3621 continue;
3622
3623 skb = skb_peek(&tmp->data_q);
3624 if (skb->priority < cur_prio)
3625 continue;
3626
3627 if (skb->priority > cur_prio) {
3628 num = 0;
3629 min = ~0;
3630 cur_prio = skb->priority;
3631 }
3632
3633 num++;
3634
3635 if (conn->sent < min) {
3636 min = conn->sent;
3637 chan = tmp;
3638 }
3639 }
3640
3641 if (hci_conn_num(hdev, type) == conn_num)
3642 break;
3643 }
3644
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003645 rcu_read_unlock();
3646
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003647 if (!chan)
3648 return NULL;
3649
3650 switch (chan->conn->type) {
3651 case ACL_LINK:
3652 cnt = hdev->acl_cnt;
3653 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003654 case AMP_LINK:
3655 cnt = hdev->block_cnt;
3656 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003657 case SCO_LINK:
3658 case ESCO_LINK:
3659 cnt = hdev->sco_cnt;
3660 break;
3661 case LE_LINK:
3662 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3663 break;
3664 default:
3665 cnt = 0;
3666 BT_ERR("Unknown link type");
3667 }
3668
3669 q = cnt / num;
3670 *quote = q ? q : 1;
3671 BT_DBG("chan %p quote %d", chan, *quote);
3672 return chan;
3673}
3674
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003675static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3676{
3677 struct hci_conn_hash *h = &hdev->conn_hash;
3678 struct hci_conn *conn;
3679 int num = 0;
3680
3681 BT_DBG("%s", hdev->name);
3682
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003683 rcu_read_lock();
3684
3685 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003686 struct hci_chan *chan;
3687
3688 if (conn->type != type)
3689 continue;
3690
3691 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3692 continue;
3693
3694 num++;
3695
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003696 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003697 struct sk_buff *skb;
3698
3699 if (chan->sent) {
3700 chan->sent = 0;
3701 continue;
3702 }
3703
3704 if (skb_queue_empty(&chan->data_q))
3705 continue;
3706
3707 skb = skb_peek(&chan->data_q);
3708 if (skb->priority >= HCI_PRIO_MAX - 1)
3709 continue;
3710
3711 skb->priority = HCI_PRIO_MAX - 1;
3712
3713 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003714 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003715 }
3716
3717 if (hci_conn_num(hdev, type) == num)
3718 break;
3719 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003720
3721 rcu_read_unlock();
3722
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003723}
3724
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003725static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3726{
3727 /* Calculate count of blocks used by this packet */
3728 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3729}
3730
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003731static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003733 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734 /* ACL tx timeout must be longer than maximum
3735 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003736 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003737 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003738 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003740}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003742static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003743{
3744 unsigned int cnt = hdev->acl_cnt;
3745 struct hci_chan *chan;
3746 struct sk_buff *skb;
3747 int quote;
3748
3749 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003750
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003751 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003752 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003753 u32 priority = (skb_peek(&chan->data_q))->priority;
3754 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003755 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003756 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003757
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003758 /* Stop if priority has changed */
3759 if (skb->priority < priority)
3760 break;
3761
3762 skb = skb_dequeue(&chan->data_q);
3763
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003764 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003765 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003766
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003767 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768 hdev->acl_last_tx = jiffies;
3769
3770 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003771 chan->sent++;
3772 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773 }
3774 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003775
3776 if (cnt != hdev->acl_cnt)
3777 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778}
3779
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003780static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003781{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003782 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003783 struct hci_chan *chan;
3784 struct sk_buff *skb;
3785 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003786 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003787
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003788 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003789
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003790 BT_DBG("%s", hdev->name);
3791
3792 if (hdev->dev_type == HCI_AMP)
3793 type = AMP_LINK;
3794 else
3795 type = ACL_LINK;
3796
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003797 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003798 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003799 u32 priority = (skb_peek(&chan->data_q))->priority;
3800 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3801 int blocks;
3802
3803 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003804 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003805
3806 /* Stop if priority has changed */
3807 if (skb->priority < priority)
3808 break;
3809
3810 skb = skb_dequeue(&chan->data_q);
3811
3812 blocks = __get_blocks(hdev, skb);
3813 if (blocks > hdev->block_cnt)
3814 return;
3815
3816 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003817 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003818
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003819 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003820 hdev->acl_last_tx = jiffies;
3821
3822 hdev->block_cnt -= blocks;
3823 quote -= blocks;
3824
3825 chan->sent += blocks;
3826 chan->conn->sent += blocks;
3827 }
3828 }
3829
3830 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003831 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003832}
3833
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003834static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003835{
3836 BT_DBG("%s", hdev->name);
3837
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003838 /* No ACL link over BR/EDR controller */
3839 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3840 return;
3841
3842 /* No AMP link over AMP controller */
3843 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003844 return;
3845
3846 switch (hdev->flow_ctl_mode) {
3847 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3848 hci_sched_acl_pkt(hdev);
3849 break;
3850
3851 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3852 hci_sched_acl_blk(hdev);
3853 break;
3854 }
3855}
3856
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003858static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859{
3860 struct hci_conn *conn;
3861 struct sk_buff *skb;
3862 int quote;
3863
3864 BT_DBG("%s", hdev->name);
3865
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003866 if (!hci_conn_num(hdev, SCO_LINK))
3867 return;
3868
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3870 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3871 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003872 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873
3874 conn->sent++;
3875 if (conn->sent == ~0)
3876 conn->sent = 0;
3877 }
3878 }
3879}
3880
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003881static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003882{
3883 struct hci_conn *conn;
3884 struct sk_buff *skb;
3885 int quote;
3886
3887 BT_DBG("%s", hdev->name);
3888
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003889 if (!hci_conn_num(hdev, ESCO_LINK))
3890 return;
3891
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003892 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3893 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003894 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3895 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003896 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003897
3898 conn->sent++;
3899 if (conn->sent == ~0)
3900 conn->sent = 0;
3901 }
3902 }
3903}
3904
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003905static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003906{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003907 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003908 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003909 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003910
3911 BT_DBG("%s", hdev->name);
3912
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003913 if (!hci_conn_num(hdev, LE_LINK))
3914 return;
3915
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003916 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003917 /* LE tx timeout must be longer than maximum
3918 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003919 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003920 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003921 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003922 }
3923
3924 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003925 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003926 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003927 u32 priority = (skb_peek(&chan->data_q))->priority;
3928 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003929 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003930 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003931
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003932 /* Stop if priority has changed */
3933 if (skb->priority < priority)
3934 break;
3935
3936 skb = skb_dequeue(&chan->data_q);
3937
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003938 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003939 hdev->le_last_tx = jiffies;
3940
3941 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003942 chan->sent++;
3943 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003944 }
3945 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003946
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003947 if (hdev->le_pkts)
3948 hdev->le_cnt = cnt;
3949 else
3950 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003951
3952 if (cnt != tmp)
3953 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003954}
3955
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003956static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003958 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959 struct sk_buff *skb;
3960
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003961 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003962 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003964 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07003965 /* Schedule queues and send stuff to HCI driver */
3966 hci_sched_acl(hdev);
3967 hci_sched_sco(hdev);
3968 hci_sched_esco(hdev);
3969 hci_sched_le(hdev);
3970 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003971
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972 /* Send next queued raw (unknown type) packet */
3973 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003974 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975}
3976
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003977/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978
3979/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003980static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981{
3982 struct hci_acl_hdr *hdr = (void *) skb->data;
3983 struct hci_conn *conn;
3984 __u16 handle, flags;
3985
3986 skb_pull(skb, HCI_ACL_HDR_SIZE);
3987
3988 handle = __le16_to_cpu(hdr->handle);
3989 flags = hci_flags(handle);
3990 handle = hci_handle(handle);
3991
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003992 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003993 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994
3995 hdev->stat.acl_rx++;
3996
3997 hci_dev_lock(hdev);
3998 conn = hci_conn_hash_lookup_handle(hdev, handle);
3999 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004000
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004002 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004003
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004005 l2cap_recv_acldata(conn, skb, flags);
4006 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004008 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004009 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010 }
4011
4012 kfree_skb(skb);
4013}
4014
4015/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004016static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017{
4018 struct hci_sco_hdr *hdr = (void *) skb->data;
4019 struct hci_conn *conn;
4020 __u16 handle;
4021
4022 skb_pull(skb, HCI_SCO_HDR_SIZE);
4023
4024 handle = __le16_to_cpu(hdr->handle);
4025
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004026 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004027
4028 hdev->stat.sco_rx++;
4029
4030 hci_dev_lock(hdev);
4031 conn = hci_conn_hash_lookup_handle(hdev, handle);
4032 hci_dev_unlock(hdev);
4033
4034 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004036 sco_recv_scodata(conn, skb);
4037 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004039 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004040 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041 }
4042
4043 kfree_skb(skb);
4044}
4045
Johan Hedberg9238f362013-03-05 20:37:48 +02004046static bool hci_req_is_complete(struct hci_dev *hdev)
4047{
4048 struct sk_buff *skb;
4049
4050 skb = skb_peek(&hdev->cmd_q);
4051 if (!skb)
4052 return true;
4053
Johan Hedberg44d27132015-11-05 09:31:40 +02004054 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
Johan Hedberg9238f362013-03-05 20:37:48 +02004055}
4056
Johan Hedberg42c6b122013-03-05 20:37:49 +02004057static void hci_resend_last(struct hci_dev *hdev)
4058{
4059 struct hci_command_hdr *sent;
4060 struct sk_buff *skb;
4061 u16 opcode;
4062
4063 if (!hdev->sent_cmd)
4064 return;
4065
4066 sent = (void *) hdev->sent_cmd->data;
4067 opcode = __le16_to_cpu(sent->opcode);
4068 if (opcode == HCI_OP_RESET)
4069 return;
4070
4071 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4072 if (!skb)
4073 return;
4074
4075 skb_queue_head(&hdev->cmd_q, skb);
4076 queue_work(hdev->workqueue, &hdev->cmd_work);
4077}
4078
Johan Hedberge62144872015-04-02 13:41:08 +03004079void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4080 hci_req_complete_t *req_complete,
4081 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004082{
Johan Hedberg9238f362013-03-05 20:37:48 +02004083 struct sk_buff *skb;
4084 unsigned long flags;
4085
4086 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4087
Johan Hedberg42c6b122013-03-05 20:37:49 +02004088 /* If the completed command doesn't match the last one that was
4089 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004090 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004091 if (!hci_sent_cmd_data(hdev, opcode)) {
4092 /* Some CSR based controllers generate a spontaneous
4093 * reset complete event during init and any pending
4094 * command will never be completed. In such a case we
4095 * need to resend whatever was the last sent
4096 * command.
4097 */
4098 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4099 hci_resend_last(hdev);
4100
Johan Hedberg9238f362013-03-05 20:37:48 +02004101 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004102 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004103
4104 /* If the command succeeded and there's still more commands in
4105 * this request the request is not yet complete.
4106 */
4107 if (!status && !hci_req_is_complete(hdev))
4108 return;
4109
4110 /* If this was the last command in a request the complete
4111 * callback would be found in hdev->sent_cmd instead of the
4112 * command queue (hdev->cmd_q).
4113 */
Johan Hedberg44d27132015-11-05 09:31:40 +02004114 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4115 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
Johan Hedberge62144872015-04-02 13:41:08 +03004116 return;
4117 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004118
Johan Hedberg44d27132015-11-05 09:31:40 +02004119 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4120 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
Johan Hedberge62144872015-04-02 13:41:08 +03004121 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004122 }
4123
4124 /* Remove all pending commands belonging to this request */
4125 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4126 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Johan Hedberg44d27132015-11-05 09:31:40 +02004127 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004128 __skb_queue_head(&hdev->cmd_q, skb);
4129 break;
4130 }
4131
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004132 *req_complete = bt_cb(skb)->hci.req_complete;
4133 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
Johan Hedberg9238f362013-03-05 20:37:48 +02004134 kfree_skb(skb);
4135 }
4136 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004137}
4138
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004139static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004141 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142 struct sk_buff *skb;
4143
4144 BT_DBG("%s", hdev->name);
4145
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004147 /* Send copy to monitor */
4148 hci_send_to_monitor(hdev, skb);
4149
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150 if (atomic_read(&hdev->promisc)) {
4151 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004152 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153 }
4154
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004155 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156 kfree_skb(skb);
4157 continue;
4158 }
4159
4160 if (test_bit(HCI_INIT, &hdev->flags)) {
4161 /* Don't process data packets in this states. */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004162 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 case HCI_ACLDATA_PKT:
4164 case HCI_SCODATA_PKT:
4165 kfree_skb(skb);
4166 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 }
4169
4170 /* Process frame */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004171 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004173 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174 hci_event_packet(hdev, skb);
4175 break;
4176
4177 case HCI_ACLDATA_PKT:
4178 BT_DBG("%s ACL data packet", hdev->name);
4179 hci_acldata_packet(hdev, skb);
4180 break;
4181
4182 case HCI_SCODATA_PKT:
4183 BT_DBG("%s SCO data packet", hdev->name);
4184 hci_scodata_packet(hdev, skb);
4185 break;
4186
4187 default:
4188 kfree_skb(skb);
4189 break;
4190 }
4191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192}
4193
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004194static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004196 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197 struct sk_buff *skb;
4198
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004199 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4200 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004203 if (atomic_read(&hdev->cmd_cnt)) {
4204 skb = skb_dequeue(&hdev->cmd_q);
4205 if (!skb)
4206 return;
4207
Wei Yongjun7585b972009-02-25 18:29:52 +08004208 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004210 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004211 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004213 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004214 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004215 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004216 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004217 schedule_delayed_work(&hdev->cmd_timer,
4218 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219 } else {
4220 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004221 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 }
4223 }
4224}