blob: 029d7798cffabdd975d70324adf0ecef136ab066 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020054DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070061static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070067 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070068 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070081
82 if (!test_bit(HCI_UP, &hdev->flags))
83 return -ENETDOWN;
84
85 if (copy_from_user(buf, user_buf, buf_size))
86 return -EFAULT;
87
88 buf[buf_size] = '\0';
89 if (strtobool(buf, &enable))
90 return -EINVAL;
91
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070092 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070093 return -EALREADY;
94
Johan Hedbergb5044302015-11-10 09:44:55 +020095 hci_req_sync_lock(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070096 if (enable)
97 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
98 HCI_CMD_TIMEOUT);
99 else
100 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101 HCI_CMD_TIMEOUT);
Johan Hedbergb5044302015-11-10 09:44:55 +0200102 hci_req_sync_unlock(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700103
104 if (IS_ERR(skb))
105 return PTR_ERR(skb);
106
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700107 kfree_skb(skb);
108
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700109 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700110
111 return count;
112}
113
114static const struct file_operations dut_mode_fops = {
115 .open = simple_open,
116 .read = dut_mode_read,
117 .write = dut_mode_write,
118 .llseek = default_llseek,
119};
120
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200121static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122 size_t count, loff_t *ppos)
123{
124 struct hci_dev *hdev = file->private_data;
125 char buf[3];
126
127 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
128 buf[1] = '\n';
129 buf[2] = '\0';
130 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131}
132
133static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134 size_t count, loff_t *ppos)
135{
136 struct hci_dev *hdev = file->private_data;
137 char buf[32];
138 size_t buf_size = min(count, (sizeof(buf)-1));
139 bool enable;
140 int err;
141
142 if (copy_from_user(buf, user_buf, buf_size))
143 return -EFAULT;
144
145 buf[buf_size] = '\0';
146 if (strtobool(buf, &enable))
147 return -EINVAL;
148
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200149 /* When the diagnostic flags are not persistent and the transport
150 * is not active, then there is no need for the vendor callback.
151 *
152 * Instead just store the desired value. If needed the setting
153 * will be programmed when the controller gets powered on.
154 */
155 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
156 !test_bit(HCI_RUNNING, &hdev->flags))
157 goto done;
158
Johan Hedbergb5044302015-11-10 09:44:55 +0200159 hci_req_sync_lock(hdev);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200160 err = hdev->set_diag(hdev, enable);
Johan Hedbergb5044302015-11-10 09:44:55 +0200161 hci_req_sync_unlock(hdev);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200162
163 if (err < 0)
164 return err;
165
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200166done:
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200167 if (enable)
168 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
169 else
170 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
171
172 return count;
173}
174
175static const struct file_operations vendor_diag_fops = {
176 .open = simple_open,
177 .read = vendor_diag_read,
178 .write = vendor_diag_write,
179 .llseek = default_llseek,
180};
181
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200182static void hci_debugfs_create_basic(struct hci_dev *hdev)
183{
184 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
185 &dut_mode_fops);
186
187 if (hdev->set_diag)
188 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
189 &vendor_diag_fops);
190}
191
Johan Hedberg42c6b122013-03-05 20:37:49 +0200192static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200194 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
196 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200197 set_bit(HCI_RESET, &req->hdev->flags);
198 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199}
200
Johan Hedberg42c6b122013-03-05 20:37:49 +0200201static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200203 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200206 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200208 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200209 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200210
211 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200212 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213}
214
Johan Hedberg0af801b2015-02-17 15:05:21 +0200215static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200216{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200217 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200218
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200219 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200220 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300221
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700222 /* Read Local Supported Commands */
223 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
224
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300225 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200226 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300227
228 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200229 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700230
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700231 /* Read Flow Control Mode */
232 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
233
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700234 /* Read Location Data */
235 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200236}
237
Johan Hedberg0af801b2015-02-17 15:05:21 +0200238static void amp_init2(struct hci_request *req)
239{
240 /* Read Local Supported Features. Not all AMP controllers
241 * support this so it's placed conditionally in the second
242 * stage init.
243 */
244 if (req->hdev->commands[14] & 0x20)
245 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
246}
247
Johan Hedberg42c6b122013-03-05 20:37:49 +0200248static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200249{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200250 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200251
252 BT_DBG("%s %ld", hdev->name, opt);
253
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300254 /* Reset */
255 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200256 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300257
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200258 switch (hdev->dev_type) {
259 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200260 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200261 break;
262
263 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200264 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200265 break;
266
267 default:
268 BT_ERR("Unknown device type %d", hdev->dev_type);
269 break;
270 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200271}
272
Johan Hedberg42c6b122013-03-05 20:37:49 +0200273static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200274{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200275 __le16 param;
276 __u8 flt_type;
277
278 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200279 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200280
281 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200283
284 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200286
287 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200288 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200289
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700290 /* Read Number of Supported IAC */
291 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
292
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700293 /* Read Current IAC LAP */
294 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
295
Johan Hedberg2177bab2013-03-05 20:37:43 +0200296 /* Clear Event Filters */
297 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200298 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200299
300 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700301 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200302 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200303}
304
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200306{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300307 struct hci_dev *hdev = req->hdev;
308
Johan Hedberg2177bab2013-03-05 20:37:43 +0200309 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200310 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200311
312 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200313 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200314
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800315 /* Read LE Supported States */
316 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
317
Johan Hedbergc73eee92013-04-19 18:35:21 +0300318 /* LE-only controllers have LE implicitly enabled */
319 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700320 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200321}
322
Johan Hedberg42c6b122013-03-05 20:37:49 +0200323static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200324{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 struct hci_dev *hdev = req->hdev;
326
Johan Hedberg2177bab2013-03-05 20:37:43 +0200327 /* The second byte is 0xff instead of 0x9f (two reserved bits
328 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
329 * command otherwise.
330 */
331 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
332
333 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
334 * any event mask for pre 1.2 devices.
335 */
336 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
337 return;
338
339 if (lmp_bredr_capable(hdev)) {
340 events[4] |= 0x01; /* Flow Specification Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700341 } else {
342 /* Use a different default for LE-only devices */
343 memset(events, 0, sizeof(events));
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700344 events[1] |= 0x20; /* Command Complete */
345 events[1] |= 0x40; /* Command Status */
346 events[1] |= 0x80; /* Hardware Error */
Marcel Holtmann5c3d3b42015-11-04 07:17:23 +0100347
348 /* If the controller supports the Disconnect command, enable
349 * the corresponding event. In addition enable packet flow
350 * control related events.
351 */
352 if (hdev->commands[0] & 0x20) {
353 events[0] |= 0x10; /* Disconnection Complete */
354 events[2] |= 0x04; /* Number of Completed Packets */
355 events[3] |= 0x02; /* Data Buffer Overflow */
356 }
357
358 /* If the controller supports the Read Remote Version
359 * Information command, enable the corresponding event.
360 */
361 if (hdev->commands[2] & 0x80)
362 events[1] |= 0x08; /* Read Remote Version Information
363 * Complete
364 */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200365
366 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
367 events[0] |= 0x80; /* Encryption Change */
368 events[5] |= 0x80; /* Encryption Key Refresh Complete */
369 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200370 }
371
Marcel Holtmann9fe759c2015-11-01 09:45:22 +0100372 if (lmp_inq_rssi_capable(hdev) ||
373 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
Johan Hedberg2177bab2013-03-05 20:37:43 +0200374 events[4] |= 0x02; /* Inquiry Result with RSSI */
375
Marcel Holtmann70f56aa2015-11-01 09:39:49 +0100376 if (lmp_ext_feat_capable(hdev))
377 events[4] |= 0x04; /* Read Remote Extended Features Complete */
378
379 if (lmp_esco_capable(hdev)) {
380 events[5] |= 0x08; /* Synchronous Connection Complete */
381 events[5] |= 0x10; /* Synchronous Connection Changed */
382 }
383
Johan Hedberg2177bab2013-03-05 20:37:43 +0200384 if (lmp_sniffsubr_capable(hdev))
385 events[5] |= 0x20; /* Sniff Subrating */
386
387 if (lmp_pause_enc_capable(hdev))
388 events[5] |= 0x80; /* Encryption Key Refresh Complete */
389
390 if (lmp_ext_inq_capable(hdev))
391 events[5] |= 0x40; /* Extended Inquiry Result */
392
393 if (lmp_no_flush_capable(hdev))
394 events[7] |= 0x01; /* Enhanced Flush Complete */
395
396 if (lmp_lsto_capable(hdev))
397 events[6] |= 0x80; /* Link Supervision Timeout Changed */
398
399 if (lmp_ssp_capable(hdev)) {
400 events[6] |= 0x01; /* IO Capability Request */
401 events[6] |= 0x02; /* IO Capability Response */
402 events[6] |= 0x04; /* User Confirmation Request */
403 events[6] |= 0x08; /* User Passkey Request */
404 events[6] |= 0x10; /* Remote OOB Data Request */
405 events[6] |= 0x20; /* Simple Pairing Complete */
406 events[7] |= 0x04; /* User Passkey Notification */
407 events[7] |= 0x08; /* Keypress Notification */
408 events[7] |= 0x10; /* Remote Host Supported
409 * Features Notification
410 */
411 }
412
413 if (lmp_le_capable(hdev))
414 events[7] |= 0x20; /* LE Meta-Event */
415
Johan Hedberg42c6b122013-03-05 20:37:49 +0200416 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200417}
418
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200420{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200421 struct hci_dev *hdev = req->hdev;
422
Johan Hedberg0af801b2015-02-17 15:05:21 +0200423 if (hdev->dev_type == HCI_AMP)
424 return amp_init2(req);
425
Johan Hedberg2177bab2013-03-05 20:37:43 +0200426 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300428 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700429 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200430
431 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100434 /* All Bluetooth 1.2 and later controllers should support the
435 * HCI command for reading the local supported commands.
436 *
437 * Unfortunately some controllers indicate Bluetooth 1.2 support,
438 * but do not have support for this command. If that is the case,
439 * the driver can quirk the behavior and skip reading the local
440 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300441 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100442 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
443 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445
446 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700447 /* When SSP is available, then the host features page
448 * should also be available as well. However some
449 * controllers list the max_page as 0 as long as SSP
450 * has not been enabled. To achieve proper debugging
451 * output, force the minimum max_page to 1 at least.
452 */
453 hdev->max_page = 0x01;
454
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700455 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200456 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800457
Johan Hedberg42c6b122013-03-05 20:37:49 +0200458 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
459 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200460 } else {
461 struct hci_cp_write_eir cp;
462
463 memset(hdev->eir, 0, sizeof(hdev->eir));
464 memset(&cp, 0, sizeof(cp));
465
Johan Hedberg42c6b122013-03-05 20:37:49 +0200466 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467 }
468 }
469
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800470 if (lmp_inq_rssi_capable(hdev) ||
471 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800472 u8 mode;
473
474 /* If Extended Inquiry Result events are supported, then
475 * they are clearly preferred over Inquiry Result with RSSI
476 * events.
477 */
478 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
479
480 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
481 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482
483 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200484 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200485
486 if (lmp_ext_feat_capable(hdev)) {
487 struct hci_cp_read_local_ext_features cp;
488
489 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
491 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200492 }
493
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700494 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200495 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200496 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
497 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498 }
499}
500
Johan Hedberg42c6b122013-03-05 20:37:49 +0200501static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200502{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200503 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200504 struct hci_cp_write_def_link_policy cp;
505 u16 link_policy = 0;
506
507 if (lmp_rswitch_capable(hdev))
508 link_policy |= HCI_LP_RSWITCH;
509 if (lmp_hold_capable(hdev))
510 link_policy |= HCI_LP_HOLD;
511 if (lmp_sniff_capable(hdev))
512 link_policy |= HCI_LP_SNIFF;
513 if (lmp_park_capable(hdev))
514 link_policy |= HCI_LP_PARK;
515
516 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518}
519
Johan Hedberg42c6b122013-03-05 20:37:49 +0200520static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200521{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200522 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200523 struct hci_cp_write_le_host_supported cp;
524
Johan Hedbergc73eee92013-04-19 18:35:21 +0300525 /* LE-only devices do not support explicit enablement */
526 if (!lmp_bredr_capable(hdev))
527 return;
528
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529 memset(&cp, 0, sizeof(cp));
530
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700531 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200532 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200533 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200534 }
535
536 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539}
540
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300541static void hci_set_event_mask_page_2(struct hci_request *req)
542{
543 struct hci_dev *hdev = req->hdev;
544 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
545
546 /* If Connectionless Slave Broadcast master role is supported
547 * enable all necessary events for it.
548 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800549 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300550 events[1] |= 0x40; /* Triggered Clock Capture */
551 events[1] |= 0x80; /* Synchronization Train Complete */
552 events[2] |= 0x10; /* Slave Page Response Timeout */
553 events[2] |= 0x20; /* CSB Channel Map Change */
554 }
555
556 /* If Connectionless Slave Broadcast slave role is supported
557 * enable all necessary events for it.
558 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800559 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300560 events[2] |= 0x01; /* Synchronization Train Received */
561 events[2] |= 0x02; /* CSB Receive */
562 events[2] |= 0x04; /* CSB Timeout */
563 events[2] |= 0x08; /* Truncated Page Complete */
564 }
565
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800566 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200567 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800568 events[2] |= 0x80;
569
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300570 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
571}
572
Johan Hedberg42c6b122013-03-05 20:37:49 +0200573static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200574{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200575 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300576 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200577
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200578 hci_setup_event_mask(req);
579
Johan Hedberge81be902015-08-30 21:47:20 +0300580 if (hdev->commands[6] & 0x20 &&
581 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800582 struct hci_cp_read_stored_link_key cp;
583
584 bacpy(&cp.bdaddr, BDADDR_ANY);
585 cp.read_all = 0x01;
586 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
587 }
588
Johan Hedberg2177bab2013-03-05 20:37:43 +0200589 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200590 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200591
Marcel Holtmann417287d2014-12-11 20:21:54 +0100592 if (hdev->commands[8] & 0x01)
593 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
594
595 /* Some older Broadcom based Bluetooth 1.2 controllers do not
596 * support the Read Page Scan Type command. Check support for
597 * this command in the bit mask of supported commands.
598 */
599 if (hdev->commands[13] & 0x01)
600 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
601
Andre Guedes9193c6e2014-07-01 18:10:09 -0300602 if (lmp_le_capable(hdev)) {
603 u8 events[8];
604
605 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200606
607 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
608 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300609
610 /* If controller supports the Connection Parameters Request
611 * Link Layer Procedure, enable the corresponding event.
612 */
613 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
614 events[0] |= 0x20; /* LE Remote Connection
615 * Parameter Request
616 */
617
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100618 /* If the controller supports the Data Length Extension
619 * feature, enable the corresponding event.
620 */
621 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
622 events[0] |= 0x40; /* LE Data Length Change */
623
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100624 /* If the controller supports Extended Scanner Filter
625 * Policies, enable the correspondig event.
626 */
627 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
628 events[1] |= 0x04; /* LE Direct Advertising
629 * Report
630 */
631
Marcel Holtmann7d26f5c2015-11-01 09:39:51 +0100632 /* If the controller supports the LE Set Scan Enable command,
633 * enable the corresponding advertising report event.
634 */
635 if (hdev->commands[26] & 0x08)
636 events[0] |= 0x02; /* LE Advertising Report */
637
638 /* If the controller supports the LE Create Connection
639 * command, enable the corresponding event.
640 */
641 if (hdev->commands[26] & 0x10)
642 events[0] |= 0x01; /* LE Connection Complete */
643
644 /* If the controller supports the LE Connection Update
645 * command, enable the corresponding event.
646 */
647 if (hdev->commands[27] & 0x04)
648 events[0] |= 0x04; /* LE Connection Update
649 * Complete
650 */
651
652 /* If the controller supports the LE Read Remote Used Features
653 * command, enable the corresponding event.
654 */
655 if (hdev->commands[27] & 0x20)
656 events[0] |= 0x08; /* LE Read Remote Used
657 * Features Complete
658 */
659
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100660 /* If the controller supports the LE Read Local P-256
661 * Public Key command, enable the corresponding event.
662 */
663 if (hdev->commands[34] & 0x02)
664 events[0] |= 0x80; /* LE Read Local P-256
665 * Public Key Complete
666 */
667
668 /* If the controller supports the LE Generate DHKey
669 * command, enable the corresponding event.
670 */
671 if (hdev->commands[34] & 0x04)
672 events[1] |= 0x01; /* LE Generate DHKey Complete */
673
Andre Guedes9193c6e2014-07-01 18:10:09 -0300674 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
675 events);
676
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200677 if (hdev->commands[25] & 0x40) {
678 /* Read LE Advertising Channel TX Power */
679 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
680 }
681
Marcel Holtmann2ab216a2015-11-01 09:39:48 +0100682 if (hdev->commands[26] & 0x40) {
683 /* Read LE White List Size */
684 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
685 0, NULL);
686 }
687
688 if (hdev->commands[26] & 0x80) {
689 /* Clear LE White List */
690 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
691 }
692
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100693 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
694 /* Read LE Maximum Data Length */
695 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
696
697 /* Read LE Suggested Default Data Length */
698 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
699 }
700
Johan Hedberg42c6b122013-03-05 20:37:49 +0200701 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300702 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300703
704 /* Read features beyond page 1 if available */
705 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
706 struct hci_cp_read_local_ext_features cp;
707
708 cp.page = p;
709 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
710 sizeof(cp), &cp);
711 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200712}
713
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300714static void hci_init4_req(struct hci_request *req, unsigned long opt)
715{
716 struct hci_dev *hdev = req->hdev;
717
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800718 /* Some Broadcom based Bluetooth controllers do not support the
719 * Delete Stored Link Key command. They are clearly indicating its
720 * absence in the bit mask of supported commands.
721 *
722 * Check the supported commands and only if the the command is marked
723 * as supported send it. If not supported assume that the controller
724 * does not have actual support for stored link keys which makes this
725 * command redundant anyway.
726 *
727 * Some controllers indicate that they support handling deleting
728 * stored link keys, but they don't. The quirk lets a driver
729 * just disable this command.
730 */
731 if (hdev->commands[6] & 0x80 &&
732 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
733 struct hci_cp_delete_stored_link_key cp;
734
735 bacpy(&cp.bdaddr, BDADDR_ANY);
736 cp.delete_all = 0x01;
737 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
738 sizeof(cp), &cp);
739 }
740
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300741 /* Set event mask page 2 if the HCI command for it is supported */
742 if (hdev->commands[22] & 0x04)
743 hci_set_event_mask_page_2(req);
744
Marcel Holtmann109e3192014-07-23 19:24:56 +0200745 /* Read local codec list if the HCI command is supported */
746 if (hdev->commands[29] & 0x20)
747 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
748
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200749 /* Get MWS transport configuration if the HCI command is supported */
750 if (hdev->commands[30] & 0x08)
751 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
752
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300753 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800754 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300755 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800756
757 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700758 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800759 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800760 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800761
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800762 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
763 sizeof(support), &support);
764 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300765}
766
Johan Hedberg2177bab2013-03-05 20:37:43 +0200767static int __hci_init(struct hci_dev *hdev)
768{
769 int err;
770
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200771 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200772 if (err < 0)
773 return err;
774
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200775 if (hci_dev_test_flag(hdev, HCI_SETUP))
776 hci_debugfs_create_basic(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700777
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200778 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200779 if (err < 0)
780 return err;
781
Johan Hedberg0af801b2015-02-17 15:05:21 +0200782 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
783 * BR/EDR/LE type controllers. AMP controllers only need the
784 * first two stages of init.
785 */
786 if (hdev->dev_type != HCI_BREDR)
787 return 0;
788
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200789 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300790 if (err < 0)
791 return err;
792
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200793 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700794 if (err < 0)
795 return err;
796
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800797 /* This function is only called when the controller is actually in
798 * configured state. When the controller is marked as unconfigured,
799 * this initialization procedure is not run.
800 *
801 * It means that it is possible that a controller runs through its
802 * setup phase and then discovers missing settings. If that is the
803 * case, then this function will not be called. It then will only
804 * be called during the config phase.
805 *
806 * So only when in setup phase or config phase, create the debugfs
807 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700808 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700809 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
810 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700811 return 0;
812
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100813 hci_debugfs_create_common(hdev);
814
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100815 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100816 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700817
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800818 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100819 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700820
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700821 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200822}
823
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200824static void hci_init0_req(struct hci_request *req, unsigned long opt)
825{
826 struct hci_dev *hdev = req->hdev;
827
828 BT_DBG("%s %ld", hdev->name, opt);
829
830 /* Reset */
831 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
832 hci_reset_req(req, 0);
833
834 /* Read Local Version */
835 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
836
837 /* Read BD Address */
838 if (hdev->set_bdaddr)
839 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
840}
841
842static int __hci_unconf_init(struct hci_dev *hdev)
843{
844 int err;
845
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200846 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
847 return 0;
848
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200849 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200850 if (err < 0)
851 return err;
852
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200853 if (hci_dev_test_flag(hdev, HCI_SETUP))
854 hci_debugfs_create_basic(hdev);
855
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200856 return 0;
857}
858
Johan Hedberg42c6b122013-03-05 20:37:49 +0200859static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
861 __u8 scan = opt;
862
Johan Hedberg42c6b122013-03-05 20:37:49 +0200863 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
865 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200866 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867}
868
Johan Hedberg42c6b122013-03-05 20:37:49 +0200869static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870{
871 __u8 auth = opt;
872
Johan Hedberg42c6b122013-03-05 20:37:49 +0200873 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
875 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200876 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877}
878
Johan Hedberg42c6b122013-03-05 20:37:49 +0200879static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880{
881 __u8 encrypt = opt;
882
Johan Hedberg42c6b122013-03-05 20:37:49 +0200883 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200885 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200886 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887}
888
Johan Hedberg42c6b122013-03-05 20:37:49 +0200889static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200890{
891 __le16 policy = cpu_to_le16(opt);
892
Johan Hedberg42c6b122013-03-05 20:37:49 +0200893 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200894
895 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200896 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200897}
898
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900899/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 * Device is held on return. */
901struct hci_dev *hci_dev_get(int index)
902{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200903 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
905 BT_DBG("%d", index);
906
907 if (index < 0)
908 return NULL;
909
910 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200911 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 if (d->id == index) {
913 hdev = hci_dev_hold(d);
914 break;
915 }
916 }
917 read_unlock(&hci_dev_list_lock);
918 return hdev;
919}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
921/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200922
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200923bool hci_discovery_active(struct hci_dev *hdev)
924{
925 struct discovery_state *discov = &hdev->discovery;
926
Andre Guedes6fbe1952012-02-03 17:47:58 -0300927 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300928 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300929 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200930 return true;
931
Andre Guedes6fbe1952012-02-03 17:47:58 -0300932 default:
933 return false;
934 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200935}
936
Johan Hedbergff9ef572012-01-04 14:23:45 +0200937void hci_discovery_set_state(struct hci_dev *hdev, int state)
938{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300939 int old_state = hdev->discovery.state;
940
Johan Hedbergff9ef572012-01-04 14:23:45 +0200941 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
942
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300943 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +0200944 return;
945
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300946 hdev->discovery.state = state;
947
Johan Hedbergff9ef572012-01-04 14:23:45 +0200948 switch (state) {
949 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -0300950 hci_update_background_scan(hdev);
951
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300952 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -0300953 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200954 break;
955 case DISCOVERY_STARTING:
956 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300957 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200958 mgmt_discovering(hdev, 1);
959 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200960 case DISCOVERY_RESOLVING:
961 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200962 case DISCOVERY_STOPPING:
963 break;
964 }
Johan Hedbergff9ef572012-01-04 14:23:45 +0200965}
966
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300967void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968{
Johan Hedberg30883512012-01-04 14:16:21 +0200969 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200970 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
Johan Hedberg561aafb2012-01-04 13:31:59 +0200972 list_for_each_entry_safe(p, n, &cache->all, all) {
973 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200974 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200976
977 INIT_LIST_HEAD(&cache->unknown);
978 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979}
980
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300981struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
982 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983{
Johan Hedberg30883512012-01-04 14:16:21 +0200984 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 struct inquiry_entry *e;
986
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300987 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
Johan Hedberg561aafb2012-01-04 13:31:59 +0200989 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200991 return e;
992 }
993
994 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995}
996
Johan Hedberg561aafb2012-01-04 13:31:59 +0200997struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300998 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200999{
Johan Hedberg30883512012-01-04 14:16:21 +02001000 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001001 struct inquiry_entry *e;
1002
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001003 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001004
1005 list_for_each_entry(e, &cache->unknown, list) {
1006 if (!bacmp(&e->data.bdaddr, bdaddr))
1007 return e;
1008 }
1009
1010 return NULL;
1011}
1012
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001013struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001014 bdaddr_t *bdaddr,
1015 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001016{
1017 struct discovery_state *cache = &hdev->discovery;
1018 struct inquiry_entry *e;
1019
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001020 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001021
1022 list_for_each_entry(e, &cache->resolve, list) {
1023 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1024 return e;
1025 if (!bacmp(&e->data.bdaddr, bdaddr))
1026 return e;
1027 }
1028
1029 return NULL;
1030}
1031
Johan Hedberga3d4e202012-01-09 00:53:02 +02001032void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001033 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001034{
1035 struct discovery_state *cache = &hdev->discovery;
1036 struct list_head *pos = &cache->resolve;
1037 struct inquiry_entry *p;
1038
1039 list_del(&ie->list);
1040
1041 list_for_each_entry(p, &cache->resolve, list) {
1042 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001043 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001044 break;
1045 pos = &p->list;
1046 }
1047
1048 list_add(&ie->list, pos);
1049}
1050
Marcel Holtmannaf589252014-07-01 14:11:20 +02001051u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1052 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053{
Johan Hedberg30883512012-01-04 14:16:21 +02001054 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001055 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001056 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001058 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059
Johan Hedberg6928a922014-10-26 20:46:09 +01001060 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001061
Marcel Holtmannaf589252014-07-01 14:11:20 +02001062 if (!data->ssp_mode)
1063 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001064
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001065 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001066 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001067 if (!ie->data.ssp_mode)
1068 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001069
Johan Hedberga3d4e202012-01-09 00:53:02 +02001070 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001071 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001072 ie->data.rssi = data->rssi;
1073 hci_inquiry_cache_update_resolve(hdev, ie);
1074 }
1075
Johan Hedberg561aafb2012-01-04 13:31:59 +02001076 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001077 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001078
Johan Hedberg561aafb2012-01-04 13:31:59 +02001079 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001080 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001081 if (!ie) {
1082 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1083 goto done;
1084 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001085
1086 list_add(&ie->all, &cache->all);
1087
1088 if (name_known) {
1089 ie->name_state = NAME_KNOWN;
1090 } else {
1091 ie->name_state = NAME_NOT_KNOWN;
1092 list_add(&ie->list, &cache->unknown);
1093 }
1094
1095update:
1096 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001097 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001098 ie->name_state = NAME_KNOWN;
1099 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 }
1101
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001102 memcpy(&ie->data, data, sizeof(*data));
1103 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001105
1106 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001107 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001108
Marcel Holtmannaf589252014-07-01 14:11:20 +02001109done:
1110 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111}
1112
1113static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1114{
Johan Hedberg30883512012-01-04 14:16:21 +02001115 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 struct inquiry_info *info = (struct inquiry_info *) buf;
1117 struct inquiry_entry *e;
1118 int copied = 0;
1119
Johan Hedberg561aafb2012-01-04 13:31:59 +02001120 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001122
1123 if (copied >= num)
1124 break;
1125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 bacpy(&info->bdaddr, &data->bdaddr);
1127 info->pscan_rep_mode = data->pscan_rep_mode;
1128 info->pscan_period_mode = data->pscan_period_mode;
1129 info->pscan_mode = data->pscan_mode;
1130 memcpy(info->dev_class, data->dev_class, 3);
1131 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001132
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001134 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 }
1136
1137 BT_DBG("cache %p, copied %d", cache, copied);
1138 return copied;
1139}
1140
Johan Hedberg42c6b122013-03-05 20:37:49 +02001141static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142{
1143 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001144 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 struct hci_cp_inquiry cp;
1146
1147 BT_DBG("%s", hdev->name);
1148
1149 if (test_bit(HCI_INQUIRY, &hdev->flags))
1150 return;
1151
1152 /* Start Inquiry */
1153 memcpy(&cp.lap, &ir->lap, 3);
1154 cp.length = ir->length;
1155 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001156 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157}
1158
1159int hci_inquiry(void __user *arg)
1160{
1161 __u8 __user *ptr = arg;
1162 struct hci_inquiry_req ir;
1163 struct hci_dev *hdev;
1164 int err = 0, do_inquiry = 0, max_rsp;
1165 long timeo;
1166 __u8 *buf;
1167
1168 if (copy_from_user(&ir, ptr, sizeof(ir)))
1169 return -EFAULT;
1170
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001171 hdev = hci_dev_get(ir.dev_id);
1172 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 return -ENODEV;
1174
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001175 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001176 err = -EBUSY;
1177 goto done;
1178 }
1179
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001180 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001181 err = -EOPNOTSUPP;
1182 goto done;
1183 }
1184
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001185 if (hdev->dev_type != HCI_BREDR) {
1186 err = -EOPNOTSUPP;
1187 goto done;
1188 }
1189
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001190 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001191 err = -EOPNOTSUPP;
1192 goto done;
1193 }
1194
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001195 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001196 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001197 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001198 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 do_inquiry = 1;
1200 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001201 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
Marcel Holtmann04837f62006-07-03 10:02:33 +02001203 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001204
1205 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001206 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001207 timeo, NULL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001208 if (err < 0)
1209 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001210
1211 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1212 * cleared). If it is interrupted by a signal, return -EINTR.
1213 */
NeilBrown74316202014-07-07 15:16:04 +10001214 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001215 TASK_INTERRUPTIBLE))
1216 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001217 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001219 /* for unlimited number of responses we will use buffer with
1220 * 255 entries
1221 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1223
1224 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1225 * copy it to the user space.
1226 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001227 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001228 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 err = -ENOMEM;
1230 goto done;
1231 }
1232
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001233 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001235 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
1237 BT_DBG("num_rsp %d", ir.num_rsp);
1238
1239 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1240 ptr += sizeof(ir);
1241 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001242 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001244 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 err = -EFAULT;
1246
1247 kfree(buf);
1248
1249done:
1250 hci_dev_put(hdev);
1251 return err;
1252}
1253
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001254static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 int ret = 0;
1257
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 BT_DBG("%s %p", hdev->name, hdev);
1259
Johan Hedbergb5044302015-11-10 09:44:55 +02001260 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001262 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001263 ret = -ENODEV;
1264 goto done;
1265 }
1266
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001267 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1268 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001269 /* Check for rfkill but allow the HCI setup stage to
1270 * proceed (which in itself doesn't cause any RF activity).
1271 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001272 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001273 ret = -ERFKILL;
1274 goto done;
1275 }
1276
1277 /* Check for valid public address or a configured static
1278 * random adddress, but let the HCI setup proceed to
1279 * be able to determine if there is a public address
1280 * or not.
1281 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001282 * In case of user channel usage, it is not important
1283 * if a public address or static random address is
1284 * available.
1285 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001286 * This check is only valid for BR/EDR controllers
1287 * since AMP controllers do not have an address.
1288 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001289 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001290 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001291 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1292 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1293 ret = -EADDRNOTAVAIL;
1294 goto done;
1295 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001296 }
1297
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 if (test_bit(HCI_UP, &hdev->flags)) {
1299 ret = -EALREADY;
1300 goto done;
1301 }
1302
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 if (hdev->open(hdev)) {
1304 ret = -EIO;
1305 goto done;
1306 }
1307
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001308 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001309 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001310
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001311 atomic_set(&hdev->cmd_cnt, 1);
1312 set_bit(HCI_INIT, &hdev->flags);
1313
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001314 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmanne131d742015-10-20 02:30:47 +02001315 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1316
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001317 if (hdev->setup)
1318 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001319
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001320 /* The transport driver can set these quirks before
1321 * creating the HCI device or in its setup callback.
1322 *
1323 * In case any of them is set, the controller has to
1324 * start up as unconfigured.
1325 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001326 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1327 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001328 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001329
1330 /* For an unconfigured controller it is required to
1331 * read at least the version information provided by
1332 * the Read Local Version Information command.
1333 *
1334 * If the set_bdaddr driver callback is provided, then
1335 * also the original Bluetooth public device address
1336 * will be read using the Read BD Address command.
1337 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001338 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001339 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001340 }
1341
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001342 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001343 /* If public address change is configured, ensure that
1344 * the address gets programmed. If the driver does not
1345 * support changing the public address, fail the power
1346 * on procedure.
1347 */
1348 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1349 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001350 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1351 else
1352 ret = -EADDRNOTAVAIL;
1353 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001354
1355 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001356 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001357 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001358 ret = __hci_init(hdev);
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001359 if (!ret && hdev->post_init)
1360 ret = hdev->post_init(hdev);
1361 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 }
1363
Marcel Holtmann7e995b92015-10-17 16:00:26 +02001364 /* If the HCI Reset command is clearing all diagnostic settings,
1365 * then they need to be reprogrammed after the init procedure
1366 * completed.
1367 */
1368 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1369 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1370 ret = hdev->set_diag(hdev, true);
1371
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001372 clear_bit(HCI_INIT, &hdev->flags);
1373
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 if (!ret) {
1375 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001376 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 set_bit(HCI_UP, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001378 hci_sock_dev_event(hdev, HCI_DEV_UP);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001379 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1380 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1381 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1382 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001383 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001384 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001385 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001386 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001387 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001388 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001390 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001391 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001392 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
1394 skb_queue_purge(&hdev->cmd_q);
1395 skb_queue_purge(&hdev->rx_q);
1396
1397 if (hdev->flush)
1398 hdev->flush(hdev);
1399
1400 if (hdev->sent_cmd) {
1401 kfree_skb(hdev->sent_cmd);
1402 hdev->sent_cmd = NULL;
1403 }
1404
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001405 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001406 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001407
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001409 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 }
1411
1412done:
Johan Hedbergb5044302015-11-10 09:44:55 +02001413 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 return ret;
1415}
1416
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001417/* ---- HCI ioctl helpers ---- */
1418
1419int hci_dev_open(__u16 dev)
1420{
1421 struct hci_dev *hdev;
1422 int err;
1423
1424 hdev = hci_dev_get(dev);
1425 if (!hdev)
1426 return -ENODEV;
1427
Marcel Holtmann4a964402014-07-02 19:10:33 +02001428 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001429 * up as user channel. Trying to bring them up as normal devices
1430 * will result into a failure. Only user channel operation is
1431 * possible.
1432 *
1433 * When this function is called for a user channel, the flag
1434 * HCI_USER_CHANNEL will be set first before attempting to
1435 * open the device.
1436 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001437 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1438 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001439 err = -EOPNOTSUPP;
1440 goto done;
1441 }
1442
Johan Hedberge1d08f42013-10-01 22:44:50 +03001443 /* We need to ensure that no other power on/off work is pending
1444 * before proceeding to call hci_dev_do_open. This is
1445 * particularly important if the setup procedure has not yet
1446 * completed.
1447 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001448 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001449 cancel_delayed_work(&hdev->power_off);
1450
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001451 /* After this call it is guaranteed that the setup procedure
1452 * has finished. This means that error conditions like RFKILL
1453 * or no valid public or static random address apply.
1454 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001455 flush_workqueue(hdev->req_workqueue);
1456
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001457 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001458 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001459 * so that pairing works for them. Once the management interface
1460 * is in use this bit will be cleared again and userspace has
1461 * to explicitly enable it.
1462 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001463 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1464 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001465 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001466
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001467 err = hci_dev_do_open(hdev);
1468
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001469done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001470 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001471 return err;
1472}
1473
Johan Hedbergd7347f32014-07-04 12:37:23 +03001474/* This function requires the caller holds hdev->lock */
1475static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1476{
1477 struct hci_conn_params *p;
1478
Johan Hedbergf161dd42014-08-15 21:06:54 +03001479 list_for_each_entry(p, &hdev->le_conn_params, list) {
1480 if (p->conn) {
1481 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001482 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001483 p->conn = NULL;
1484 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001485 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001486 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001487
1488 BT_DBG("All LE pending actions cleared");
1489}
1490
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001491int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492{
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001493 bool auto_off;
1494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 BT_DBG("%s %p", hdev->name, hdev);
1496
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001497 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001498 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001499 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001500 /* Execute vendor specific shutdown routine */
1501 if (hdev->shutdown)
1502 hdev->shutdown(hdev);
1503 }
1504
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001505 cancel_delayed_work(&hdev->power_off);
1506
Johan Hedbergb5044302015-11-10 09:44:55 +02001507 hci_req_sync_cancel(hdev, ENODEV);
1508 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
1510 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001511 cancel_delayed_work_sync(&hdev->cmd_timer);
Johan Hedbergb5044302015-11-10 09:44:55 +02001512 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 return 0;
1514 }
1515
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001516 /* Flush RX and TX works */
1517 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001518 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001520 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001521 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001522 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001523 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1524 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001525 }
1526
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001527 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001528 cancel_delayed_work(&hdev->service_cache);
1529
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001530 cancel_delayed_work_sync(&hdev->le_scan_disable);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08001531 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001532
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001533 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001534 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001535
Florian Grandel5d900e42015-06-18 03:16:35 +02001536 if (hdev->adv_instance_timeout) {
1537 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1538 hdev->adv_instance_timeout = 0;
1539 }
1540
Johan Hedberg76727c02014-11-18 09:00:14 +02001541 /* Avoid potential lockdep warnings from the *_flush() calls by
1542 * ensuring the workqueue is empty up front.
1543 */
1544 drain_workqueue(hdev->workqueue);
1545
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001546 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001547
Johan Hedberg8f502f82015-01-28 19:56:02 +02001548 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1549
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001550 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1551
1552 if (!auto_off && hdev->dev_type == HCI_BREDR)
1553 mgmt_powered(hdev, 0);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001554
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001555 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001556 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001557 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001558 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Marcel Holtmann64dae962015-01-28 14:10:28 -08001560 smp_unregister(hdev);
1561
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001562 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
1564 if (hdev->flush)
1565 hdev->flush(hdev);
1566
1567 /* Reset device */
1568 skb_queue_purge(&hdev->cmd_q);
1569 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001570 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1571 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001573 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 clear_bit(HCI_INIT, &hdev->flags);
1575 }
1576
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001577 /* flush cmd work */
1578 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579
1580 /* Drop queues */
1581 skb_queue_purge(&hdev->rx_q);
1582 skb_queue_purge(&hdev->cmd_q);
1583 skb_queue_purge(&hdev->raw_q);
1584
1585 /* Drop last sent command */
1586 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001587 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 kfree_skb(hdev->sent_cmd);
1589 hdev->sent_cmd = NULL;
1590 }
1591
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001592 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001593 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001594
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 /* After this point our queues are empty
1596 * and no tasks are scheduled. */
1597 hdev->close(hdev);
1598
Johan Hedberg35b973c2013-03-15 17:06:59 -05001599 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001600 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001601 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001602
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001603 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001604 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001605
Johan Hedberge59fda82012-02-22 18:11:53 +02001606 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001607 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001608 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001609
Johan Hedbergb5044302015-11-10 09:44:55 +02001610 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001612 hci_request_cancel_all(hdev);
1613
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 hci_dev_put(hdev);
1615 return 0;
1616}
1617
1618int hci_dev_close(__u16 dev)
1619{
1620 struct hci_dev *hdev;
1621 int err;
1622
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001623 hdev = hci_dev_get(dev);
1624 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001626
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001627 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001628 err = -EBUSY;
1629 goto done;
1630 }
1631
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001632 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001633 cancel_delayed_work(&hdev->power_off);
1634
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001636
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001637done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 hci_dev_put(hdev);
1639 return err;
1640}
1641
Marcel Holtmann5c912492015-01-28 11:53:05 -08001642static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001644 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645
Marcel Holtmann5c912492015-01-28 11:53:05 -08001646 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647
Johan Hedbergb5044302015-11-10 09:44:55 +02001648 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 /* Drop queues */
1651 skb_queue_purge(&hdev->rx_q);
1652 skb_queue_purge(&hdev->cmd_q);
1653
Johan Hedberg76727c02014-11-18 09:00:14 +02001654 /* Avoid potential lockdep warnings from the *_flush() calls by
1655 * ensuring the workqueue is empty up front.
1656 */
1657 drain_workqueue(hdev->workqueue);
1658
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001659 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001660 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001662 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663
1664 if (hdev->flush)
1665 hdev->flush(hdev);
1666
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001667 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001668 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001670 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671
Johan Hedbergb5044302015-11-10 09:44:55 +02001672 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 return ret;
1674}
1675
Marcel Holtmann5c912492015-01-28 11:53:05 -08001676int hci_dev_reset(__u16 dev)
1677{
1678 struct hci_dev *hdev;
1679 int err;
1680
1681 hdev = hci_dev_get(dev);
1682 if (!hdev)
1683 return -ENODEV;
1684
1685 if (!test_bit(HCI_UP, &hdev->flags)) {
1686 err = -ENETDOWN;
1687 goto done;
1688 }
1689
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001690 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001691 err = -EBUSY;
1692 goto done;
1693 }
1694
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001695 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001696 err = -EOPNOTSUPP;
1697 goto done;
1698 }
1699
1700 err = hci_dev_do_reset(hdev);
1701
1702done:
1703 hci_dev_put(hdev);
1704 return err;
1705}
1706
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707int hci_dev_reset_stat(__u16 dev)
1708{
1709 struct hci_dev *hdev;
1710 int ret = 0;
1711
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001712 hdev = hci_dev_get(dev);
1713 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 return -ENODEV;
1715
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001716 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001717 ret = -EBUSY;
1718 goto done;
1719 }
1720
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001721 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001722 ret = -EOPNOTSUPP;
1723 goto done;
1724 }
1725
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1727
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001728done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 return ret;
1731}
1732
Johan Hedberg123abc02014-07-10 12:09:07 +03001733static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1734{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001735 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001736
1737 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1738
1739 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001740 conn_changed = !hci_dev_test_and_set_flag(hdev,
1741 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001742 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001743 conn_changed = hci_dev_test_and_clear_flag(hdev,
1744 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001745
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001746 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001747 discov_changed = !hci_dev_test_and_set_flag(hdev,
1748 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001749 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001750 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001751 discov_changed = hci_dev_test_and_clear_flag(hdev,
1752 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001753 }
1754
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001755 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001756 return;
1757
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001758 if (conn_changed || discov_changed) {
1759 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001760 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001761
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001762 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001763 mgmt_update_adv_data(hdev);
1764
Johan Hedberg123abc02014-07-10 12:09:07 +03001765 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001766 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001767}
1768
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769int hci_dev_cmd(unsigned int cmd, void __user *arg)
1770{
1771 struct hci_dev *hdev;
1772 struct hci_dev_req dr;
1773 int err = 0;
1774
1775 if (copy_from_user(&dr, arg, sizeof(dr)))
1776 return -EFAULT;
1777
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001778 hdev = hci_dev_get(dr.dev_id);
1779 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 return -ENODEV;
1781
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001782 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001783 err = -EBUSY;
1784 goto done;
1785 }
1786
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001787 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001788 err = -EOPNOTSUPP;
1789 goto done;
1790 }
1791
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001792 if (hdev->dev_type != HCI_BREDR) {
1793 err = -EOPNOTSUPP;
1794 goto done;
1795 }
1796
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001797 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001798 err = -EOPNOTSUPP;
1799 goto done;
1800 }
1801
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 switch (cmd) {
1803 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001804 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001805 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 break;
1807
1808 case HCISETENCRYPT:
1809 if (!lmp_encrypt_capable(hdev)) {
1810 err = -EOPNOTSUPP;
1811 break;
1812 }
1813
1814 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1815 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001816 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001817 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 if (err)
1819 break;
1820 }
1821
Johan Hedberg01178cd2013-03-05 20:37:41 +02001822 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001823 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 break;
1825
1826 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001827 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001828 HCI_INIT_TIMEOUT, NULL);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001829
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001830 /* Ensure that the connectable and discoverable states
1831 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001832 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001833 if (!err)
1834 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 break;
1836
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001837 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001838 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001839 HCI_INIT_TIMEOUT, NULL);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001840 break;
1841
1842 case HCISETLINKMODE:
1843 hdev->link_mode = ((__u16) dr.dev_opt) &
1844 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1845 break;
1846
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 case HCISETPTYPE:
1848 hdev->pkt_type = (__u16) dr.dev_opt;
1849 break;
1850
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001852 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1853 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 break;
1855
1856 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001857 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1858 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 break;
1860
1861 default:
1862 err = -EINVAL;
1863 break;
1864 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001865
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001866done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 hci_dev_put(hdev);
1868 return err;
1869}
1870
1871int hci_get_dev_list(void __user *arg)
1872{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001873 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 struct hci_dev_list_req *dl;
1875 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 int n = 0, size, err;
1877 __u16 dev_num;
1878
1879 if (get_user(dev_num, (__u16 __user *) arg))
1880 return -EFAULT;
1881
1882 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1883 return -EINVAL;
1884
1885 size = sizeof(*dl) + dev_num * sizeof(*dr);
1886
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001887 dl = kzalloc(size, GFP_KERNEL);
1888 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 return -ENOMEM;
1890
1891 dr = dl->dev_req;
1892
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001893 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001894 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001895 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001896
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001897 /* When the auto-off is configured it means the transport
1898 * is running, but in that case still indicate that the
1899 * device is actually down.
1900 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001901 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001902 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001903
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001905 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001906
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 if (++n >= dev_num)
1908 break;
1909 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001910 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911
1912 dl->dev_num = n;
1913 size = sizeof(*dl) + n * sizeof(*dr);
1914
1915 err = copy_to_user(arg, dl, size);
1916 kfree(dl);
1917
1918 return err ? -EFAULT : 0;
1919}
1920
1921int hci_get_dev_info(void __user *arg)
1922{
1923 struct hci_dev *hdev;
1924 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001925 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 int err = 0;
1927
1928 if (copy_from_user(&di, arg, sizeof(di)))
1929 return -EFAULT;
1930
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001931 hdev = hci_dev_get(di.dev_id);
1932 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 return -ENODEV;
1934
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001935 /* When the auto-off is configured it means the transport
1936 * is running, but in that case still indicate that the
1937 * device is actually down.
1938 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001939 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001940 flags = hdev->flags & ~BIT(HCI_UP);
1941 else
1942 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001943
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 strcpy(di.name, hdev->name);
1945 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001946 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001947 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001949 if (lmp_bredr_capable(hdev)) {
1950 di.acl_mtu = hdev->acl_mtu;
1951 di.acl_pkts = hdev->acl_pkts;
1952 di.sco_mtu = hdev->sco_mtu;
1953 di.sco_pkts = hdev->sco_pkts;
1954 } else {
1955 di.acl_mtu = hdev->le_mtu;
1956 di.acl_pkts = hdev->le_pkts;
1957 di.sco_mtu = 0;
1958 di.sco_pkts = 0;
1959 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 di.link_policy = hdev->link_policy;
1961 di.link_mode = hdev->link_mode;
1962
1963 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1964 memcpy(&di.features, &hdev->features, sizeof(di.features));
1965
1966 if (copy_to_user(arg, &di, sizeof(di)))
1967 err = -EFAULT;
1968
1969 hci_dev_put(hdev);
1970
1971 return err;
1972}
1973
1974/* ---- Interface to HCI drivers ---- */
1975
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001976static int hci_rfkill_set_block(void *data, bool blocked)
1977{
1978 struct hci_dev *hdev = data;
1979
1980 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1981
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001982 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001983 return -EBUSY;
1984
Johan Hedberg5e130362013-09-13 08:58:17 +03001985 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001986 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001987 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1988 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03001989 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001990 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001991 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001992 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001993
1994 return 0;
1995}
1996
1997static const struct rfkill_ops hci_rfkill_ops = {
1998 .set_block = hci_rfkill_set_block,
1999};
2000
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002001static void hci_power_on(struct work_struct *work)
2002{
2003 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002004 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002005
2006 BT_DBG("%s", hdev->name);
2007
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002008 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002009 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302010 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002011 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302012 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002013 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002014 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002015
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002016 /* During the HCI setup phase, a few error conditions are
2017 * ignored and they need to be checked now. If they are still
2018 * valid, it is important to turn the device back off.
2019 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002020 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2021 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002022 (hdev->dev_type == HCI_BREDR &&
2023 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2024 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002025 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002026 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002027 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002028 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2029 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002030 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002031
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002032 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002033 /* For unconfigured devices, set the HCI_RAW flag
2034 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002035 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002036 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002037 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002038
2039 /* For fully configured devices, this will send
2040 * the Index Added event. For unconfigured devices,
2041 * it will send Unconfigued Index Added event.
2042 *
2043 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2044 * and no event will be send.
2045 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002046 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002047 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002048 /* When the controller is now configured, then it
2049 * is important to clear the HCI_RAW flag.
2050 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002051 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002052 clear_bit(HCI_RAW, &hdev->flags);
2053
Marcel Holtmannd603b762014-07-06 12:11:14 +02002054 /* Powering on the controller with HCI_CONFIG set only
2055 * happens with the transition from unconfigured to
2056 * configured. This will send the Index Added event.
2057 */
2058 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002059 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002060}
2061
2062static void hci_power_off(struct work_struct *work)
2063{
Johan Hedberg32435532011-11-07 22:16:04 +02002064 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002065 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002066
2067 BT_DBG("%s", hdev->name);
2068
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002069 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002070}
2071
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002072static void hci_error_reset(struct work_struct *work)
2073{
2074 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2075
2076 BT_DBG("%s", hdev->name);
2077
2078 if (hdev->hw_error)
2079 hdev->hw_error(hdev, hdev->hw_error_code);
2080 else
2081 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2082 hdev->hw_error_code);
2083
2084 if (hci_dev_do_close(hdev))
2085 return;
2086
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002087 hci_dev_do_open(hdev);
2088}
2089
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002090static void hci_discov_off(struct work_struct *work)
2091{
2092 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002093
2094 hdev = container_of(work, struct hci_dev, discov_off.work);
2095
2096 BT_DBG("%s", hdev->name);
2097
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002098 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002099}
2100
Florian Grandel5d900e42015-06-18 03:16:35 +02002101static void hci_adv_timeout_expire(struct work_struct *work)
2102{
2103 struct hci_dev *hdev;
2104
2105 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2106
2107 BT_DBG("%s", hdev->name);
2108
2109 mgmt_adv_timeout_expired(hdev);
2110}
2111
Johan Hedberg35f74982014-02-18 17:14:32 +02002112void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002113{
Johan Hedberg48210022013-01-27 00:31:28 +02002114 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002115
Johan Hedberg48210022013-01-27 00:31:28 +02002116 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2117 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002118 kfree(uuid);
2119 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002120}
2121
Johan Hedberg35f74982014-02-18 17:14:32 +02002122void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002123{
Johan Hedberg0378b592014-11-19 15:22:22 +02002124 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002125
Johan Hedberg0378b592014-11-19 15:22:22 +02002126 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2127 list_del_rcu(&key->list);
2128 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002129 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002130}
2131
Johan Hedberg35f74982014-02-18 17:14:32 +02002132void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002133{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002134 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002135
Johan Hedberg970d0f12014-11-13 14:37:47 +02002136 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2137 list_del_rcu(&k->list);
2138 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002139 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002140}
2141
Johan Hedberg970c4e42014-02-18 10:19:33 +02002142void hci_smp_irks_clear(struct hci_dev *hdev)
2143{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002144 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002145
Johan Hedbergadae20c2014-11-13 14:37:48 +02002146 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2147 list_del_rcu(&k->list);
2148 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002149 }
2150}
2151
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002152struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2153{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002154 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002155
Johan Hedberg0378b592014-11-19 15:22:22 +02002156 rcu_read_lock();
2157 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2158 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2159 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002160 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002161 }
2162 }
2163 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002164
2165 return NULL;
2166}
2167
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302168static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002169 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002170{
2171 /* Legacy key */
2172 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302173 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002174
2175 /* Debug keys are insecure so don't store them persistently */
2176 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302177 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002178
2179 /* Changed combination key and there's no previous one */
2180 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302181 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002182
2183 /* Security mode 3 case */
2184 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302185 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002186
Johan Hedberge3befab2014-06-01 16:33:39 +03002187 /* BR/EDR key derived using SC from an LE link */
2188 if (conn->type == LE_LINK)
2189 return true;
2190
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002191 /* Neither local nor remote side had no-bonding as requirement */
2192 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302193 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002194
2195 /* Local side had dedicated bonding as requirement */
2196 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302197 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002198
2199 /* Remote side had dedicated bonding as requirement */
2200 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302201 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002202
2203 /* If none of the above criteria match, then don't store the key
2204 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302205 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002206}
2207
Johan Hedberge804d252014-07-16 11:42:28 +03002208static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002209{
Johan Hedberge804d252014-07-16 11:42:28 +03002210 if (type == SMP_LTK)
2211 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002212
Johan Hedberge804d252014-07-16 11:42:28 +03002213 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002214}
2215
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002216struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2217 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002218{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002219 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002220
Johan Hedberg970d0f12014-11-13 14:37:47 +02002221 rcu_read_lock();
2222 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002223 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2224 continue;
2225
Johan Hedberg923e2412014-12-03 12:43:39 +02002226 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002227 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002228 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002229 }
2230 }
2231 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002232
2233 return NULL;
2234}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002235
Johan Hedberg970c4e42014-02-18 10:19:33 +02002236struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2237{
2238 struct smp_irk *irk;
2239
Johan Hedbergadae20c2014-11-13 14:37:48 +02002240 rcu_read_lock();
2241 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2242 if (!bacmp(&irk->rpa, rpa)) {
2243 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002244 return irk;
2245 }
2246 }
2247
Johan Hedbergadae20c2014-11-13 14:37:48 +02002248 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2249 if (smp_irk_matches(hdev, irk->val, rpa)) {
2250 bacpy(&irk->rpa, rpa);
2251 rcu_read_unlock();
2252 return irk;
2253 }
2254 }
2255 rcu_read_unlock();
2256
Johan Hedberg970c4e42014-02-18 10:19:33 +02002257 return NULL;
2258}
2259
2260struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2261 u8 addr_type)
2262{
2263 struct smp_irk *irk;
2264
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002265 /* Identity Address must be public or static random */
2266 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2267 return NULL;
2268
Johan Hedbergadae20c2014-11-13 14:37:48 +02002269 rcu_read_lock();
2270 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002271 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002272 bacmp(bdaddr, &irk->bdaddr) == 0) {
2273 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002274 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002275 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002276 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002277 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002278
2279 return NULL;
2280}
2281
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002282struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002283 bdaddr_t *bdaddr, u8 *val, u8 type,
2284 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002285{
2286 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302287 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002288
2289 old_key = hci_find_link_key(hdev, bdaddr);
2290 if (old_key) {
2291 old_key_type = old_key->type;
2292 key = old_key;
2293 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002294 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002295 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002296 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002297 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002298 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002299 }
2300
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002301 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002302
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002303 /* Some buggy controller combinations generate a changed
2304 * combination key for legacy pairing even when there's no
2305 * previous key */
2306 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002307 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002308 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002309 if (conn)
2310 conn->key_type = type;
2311 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002312
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002313 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002314 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002315 key->pin_len = pin_len;
2316
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002317 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002318 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002319 else
2320 key->type = type;
2321
Johan Hedberg7652ff62014-06-24 13:15:49 +03002322 if (persistent)
2323 *persistent = hci_persistent_key(hdev, conn, type,
2324 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002325
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002326 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002327}
2328
Johan Hedbergca9142b2014-02-19 14:57:44 +02002329struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002330 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002331 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002332{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002333 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002334 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002335
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002336 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002337 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002338 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002339 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002340 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002341 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002342 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002343 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002344 }
2345
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002346 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002347 key->bdaddr_type = addr_type;
2348 memcpy(key->val, tk, sizeof(key->val));
2349 key->authenticated = authenticated;
2350 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002351 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002352 key->enc_size = enc_size;
2353 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002354
Johan Hedbergca9142b2014-02-19 14:57:44 +02002355 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002356}
2357
Johan Hedbergca9142b2014-02-19 14:57:44 +02002358struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2359 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002360{
2361 struct smp_irk *irk;
2362
2363 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2364 if (!irk) {
2365 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2366 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002367 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002368
2369 bacpy(&irk->bdaddr, bdaddr);
2370 irk->addr_type = addr_type;
2371
Johan Hedbergadae20c2014-11-13 14:37:48 +02002372 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002373 }
2374
2375 memcpy(irk->val, val, 16);
2376 bacpy(&irk->rpa, rpa);
2377
Johan Hedbergca9142b2014-02-19 14:57:44 +02002378 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002379}
2380
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002381int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2382{
2383 struct link_key *key;
2384
2385 key = hci_find_link_key(hdev, bdaddr);
2386 if (!key)
2387 return -ENOENT;
2388
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002389 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002390
Johan Hedberg0378b592014-11-19 15:22:22 +02002391 list_del_rcu(&key->list);
2392 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002393
2394 return 0;
2395}
2396
Johan Hedberge0b2b272014-02-18 17:14:31 +02002397int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002398{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002399 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002400 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002401
Johan Hedberg970d0f12014-11-13 14:37:47 +02002402 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002403 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002404 continue;
2405
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002406 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002407
Johan Hedberg970d0f12014-11-13 14:37:47 +02002408 list_del_rcu(&k->list);
2409 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002410 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002411 }
2412
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002413 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002414}
2415
Johan Hedberga7ec7332014-02-18 17:14:35 +02002416void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2417{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002418 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002419
Johan Hedbergadae20c2014-11-13 14:37:48 +02002420 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002421 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2422 continue;
2423
2424 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2425
Johan Hedbergadae20c2014-11-13 14:37:48 +02002426 list_del_rcu(&k->list);
2427 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002428 }
2429}
2430
Johan Hedberg55e76b32015-03-10 22:34:40 +02002431bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2432{
2433 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002434 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002435 u8 addr_type;
2436
2437 if (type == BDADDR_BREDR) {
2438 if (hci_find_link_key(hdev, bdaddr))
2439 return true;
2440 return false;
2441 }
2442
2443 /* Convert to HCI addr type which struct smp_ltk uses */
2444 if (type == BDADDR_LE_PUBLIC)
2445 addr_type = ADDR_LE_DEV_PUBLIC;
2446 else
2447 addr_type = ADDR_LE_DEV_RANDOM;
2448
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002449 irk = hci_get_irk(hdev, bdaddr, addr_type);
2450 if (irk) {
2451 bdaddr = &irk->bdaddr;
2452 addr_type = irk->addr_type;
2453 }
2454
Johan Hedberg55e76b32015-03-10 22:34:40 +02002455 rcu_read_lock();
2456 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002457 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2458 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002459 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002460 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002461 }
2462 rcu_read_unlock();
2463
2464 return false;
2465}
2466
Ville Tervo6bd32322011-02-16 16:32:41 +02002467/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002468static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002469{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002470 struct hci_dev *hdev = container_of(work, struct hci_dev,
2471 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002472
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002473 if (hdev->sent_cmd) {
2474 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2475 u16 opcode = __le16_to_cpu(sent->opcode);
2476
2477 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2478 } else {
2479 BT_ERR("%s command tx timeout", hdev->name);
2480 }
2481
Ville Tervo6bd32322011-02-16 16:32:41 +02002482 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002483 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002484}
2485
Szymon Janc2763eda2011-03-22 13:12:22 +01002486struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002487 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002488{
2489 struct oob_data *data;
2490
Johan Hedberg6928a922014-10-26 20:46:09 +01002491 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2492 if (bacmp(bdaddr, &data->bdaddr) != 0)
2493 continue;
2494 if (data->bdaddr_type != bdaddr_type)
2495 continue;
2496 return data;
2497 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002498
2499 return NULL;
2500}
2501
Johan Hedberg6928a922014-10-26 20:46:09 +01002502int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2503 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002504{
2505 struct oob_data *data;
2506
Johan Hedberg6928a922014-10-26 20:46:09 +01002507 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002508 if (!data)
2509 return -ENOENT;
2510
Johan Hedberg6928a922014-10-26 20:46:09 +01002511 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002512
2513 list_del(&data->list);
2514 kfree(data);
2515
2516 return 0;
2517}
2518
Johan Hedberg35f74982014-02-18 17:14:32 +02002519void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002520{
2521 struct oob_data *data, *n;
2522
2523 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2524 list_del(&data->list);
2525 kfree(data);
2526 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002527}
2528
Marcel Holtmann07988722014-01-10 02:07:29 -08002529int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002530 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002531 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002532{
2533 struct oob_data *data;
2534
Johan Hedberg6928a922014-10-26 20:46:09 +01002535 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002536 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002537 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002538 if (!data)
2539 return -ENOMEM;
2540
2541 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002542 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002543 list_add(&data->list, &hdev->remote_oob_data);
2544 }
2545
Johan Hedberg81328d52014-10-26 20:33:47 +01002546 if (hash192 && rand192) {
2547 memcpy(data->hash192, hash192, sizeof(data->hash192));
2548 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002549 if (hash256 && rand256)
2550 data->present = 0x03;
Johan Hedberg81328d52014-10-26 20:33:47 +01002551 } else {
2552 memset(data->hash192, 0, sizeof(data->hash192));
2553 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002554 if (hash256 && rand256)
2555 data->present = 0x02;
2556 else
2557 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002558 }
2559
Johan Hedberg81328d52014-10-26 20:33:47 +01002560 if (hash256 && rand256) {
2561 memcpy(data->hash256, hash256, sizeof(data->hash256));
2562 memcpy(data->rand256, rand256, sizeof(data->rand256));
2563 } else {
2564 memset(data->hash256, 0, sizeof(data->hash256));
2565 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002566 if (hash192 && rand192)
2567 data->present = 0x01;
Johan Hedberg81328d52014-10-26 20:33:47 +01002568 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002569
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002570 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002571
2572 return 0;
2573}
2574
Florian Grandeld2609b32015-06-18 03:16:34 +02002575/* This function requires the caller holds hdev->lock */
2576struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2577{
2578 struct adv_info *adv_instance;
2579
2580 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2581 if (adv_instance->instance == instance)
2582 return adv_instance;
2583 }
2584
2585 return NULL;
2586}
2587
2588/* This function requires the caller holds hdev->lock */
2589struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2590 struct adv_info *cur_instance;
2591
2592 cur_instance = hci_find_adv_instance(hdev, instance);
2593 if (!cur_instance)
2594 return NULL;
2595
2596 if (cur_instance == list_last_entry(&hdev->adv_instances,
2597 struct adv_info, list))
2598 return list_first_entry(&hdev->adv_instances,
2599 struct adv_info, list);
2600 else
2601 return list_next_entry(cur_instance, list);
2602}
2603
2604/* This function requires the caller holds hdev->lock */
2605int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2606{
2607 struct adv_info *adv_instance;
2608
2609 adv_instance = hci_find_adv_instance(hdev, instance);
2610 if (!adv_instance)
2611 return -ENOENT;
2612
2613 BT_DBG("%s removing %dMR", hdev->name, instance);
2614
Florian Grandel5d900e42015-06-18 03:16:35 +02002615 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2616 cancel_delayed_work(&hdev->adv_instance_expire);
2617 hdev->adv_instance_timeout = 0;
2618 }
2619
Florian Grandeld2609b32015-06-18 03:16:34 +02002620 list_del(&adv_instance->list);
2621 kfree(adv_instance);
2622
2623 hdev->adv_instance_cnt--;
2624
2625 return 0;
2626}
2627
2628/* This function requires the caller holds hdev->lock */
2629void hci_adv_instances_clear(struct hci_dev *hdev)
2630{
2631 struct adv_info *adv_instance, *n;
2632
Florian Grandel5d900e42015-06-18 03:16:35 +02002633 if (hdev->adv_instance_timeout) {
2634 cancel_delayed_work(&hdev->adv_instance_expire);
2635 hdev->adv_instance_timeout = 0;
2636 }
2637
Florian Grandeld2609b32015-06-18 03:16:34 +02002638 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2639 list_del(&adv_instance->list);
2640 kfree(adv_instance);
2641 }
2642
2643 hdev->adv_instance_cnt = 0;
2644}
2645
2646/* This function requires the caller holds hdev->lock */
2647int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2648 u16 adv_data_len, u8 *adv_data,
2649 u16 scan_rsp_len, u8 *scan_rsp_data,
2650 u16 timeout, u16 duration)
2651{
2652 struct adv_info *adv_instance;
2653
2654 adv_instance = hci_find_adv_instance(hdev, instance);
2655 if (adv_instance) {
2656 memset(adv_instance->adv_data, 0,
2657 sizeof(adv_instance->adv_data));
2658 memset(adv_instance->scan_rsp_data, 0,
2659 sizeof(adv_instance->scan_rsp_data));
2660 } else {
2661 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2662 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2663 return -EOVERFLOW;
2664
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002665 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002666 if (!adv_instance)
2667 return -ENOMEM;
2668
Florian Grandelfffd38b2015-06-18 03:16:47 +02002669 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002670 adv_instance->instance = instance;
2671 list_add(&adv_instance->list, &hdev->adv_instances);
2672 hdev->adv_instance_cnt++;
2673 }
2674
2675 adv_instance->flags = flags;
2676 adv_instance->adv_data_len = adv_data_len;
2677 adv_instance->scan_rsp_len = scan_rsp_len;
2678
2679 if (adv_data_len)
2680 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2681
2682 if (scan_rsp_len)
2683 memcpy(adv_instance->scan_rsp_data,
2684 scan_rsp_data, scan_rsp_len);
2685
2686 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002687 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002688
2689 if (duration == 0)
2690 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2691 else
2692 adv_instance->duration = duration;
2693
2694 BT_DBG("%s for %dMR", hdev->name, instance);
2695
2696 return 0;
2697}
2698
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002699struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002700 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002701{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002702 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002703
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002704 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002705 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002706 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002707 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002708
2709 return NULL;
2710}
2711
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002712void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002713{
2714 struct list_head *p, *n;
2715
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002716 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002717 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002718
2719 list_del(p);
2720 kfree(b);
2721 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002722}
2723
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002724int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002725{
2726 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002727
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002728 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002729 return -EBADF;
2730
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002731 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002732 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002733
Johan Hedberg27f70f32014-07-21 10:50:06 +03002734 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002735 if (!entry)
2736 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002737
2738 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002739 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002740
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002741 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002742
2743 return 0;
2744}
2745
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002746int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002747{
2748 struct bdaddr_list *entry;
2749
Johan Hedberg35f74982014-02-18 17:14:32 +02002750 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002751 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002752 return 0;
2753 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002754
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002755 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002756 if (!entry)
2757 return -ENOENT;
2758
2759 list_del(&entry->list);
2760 kfree(entry);
2761
2762 return 0;
2763}
2764
Andre Guedes15819a72014-02-03 13:56:18 -03002765/* This function requires the caller holds hdev->lock */
2766struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2767 bdaddr_t *addr, u8 addr_type)
2768{
2769 struct hci_conn_params *params;
2770
2771 list_for_each_entry(params, &hdev->le_conn_params, list) {
2772 if (bacmp(&params->addr, addr) == 0 &&
2773 params->addr_type == addr_type) {
2774 return params;
2775 }
2776 }
2777
2778 return NULL;
2779}
2780
2781/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002782struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2783 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002784{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002785 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002786
Johan Hedberg501f8822014-07-04 12:37:26 +03002787 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002788 if (bacmp(&param->addr, addr) == 0 &&
2789 param->addr_type == addr_type)
2790 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002791 }
2792
2793 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002794}
2795
2796/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002797struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2798 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002799{
2800 struct hci_conn_params *params;
2801
2802 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002803 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002804 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002805
2806 params = kzalloc(sizeof(*params), GFP_KERNEL);
2807 if (!params) {
2808 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002809 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002810 }
2811
2812 bacpy(&params->addr, addr);
2813 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002814
2815 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002816 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002817
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002818 params->conn_min_interval = hdev->le_conn_min_interval;
2819 params->conn_max_interval = hdev->le_conn_max_interval;
2820 params->conn_latency = hdev->le_conn_latency;
2821 params->supervision_timeout = hdev->le_supv_timeout;
2822 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2823
2824 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2825
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002826 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002827}
2828
Johan Hedbergf6c63242014-08-15 21:06:59 +03002829static void hci_conn_params_free(struct hci_conn_params *params)
2830{
2831 if (params->conn) {
2832 hci_conn_drop(params->conn);
2833 hci_conn_put(params->conn);
2834 }
2835
2836 list_del(&params->action);
2837 list_del(&params->list);
2838 kfree(params);
2839}
2840
Andre Guedes15819a72014-02-03 13:56:18 -03002841/* This function requires the caller holds hdev->lock */
2842void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2843{
2844 struct hci_conn_params *params;
2845
2846 params = hci_conn_params_lookup(hdev, addr, addr_type);
2847 if (!params)
2848 return;
2849
Johan Hedbergf6c63242014-08-15 21:06:59 +03002850 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002851
Johan Hedberg95305ba2014-07-04 12:37:21 +03002852 hci_update_background_scan(hdev);
2853
Andre Guedes15819a72014-02-03 13:56:18 -03002854 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2855}
2856
2857/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03002858void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002859{
2860 struct hci_conn_params *params, *tmp;
2861
2862 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03002863 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2864 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02002865
2866 /* If trying to estabilish one time connection to disabled
2867 * device, leave the params, but mark them as just once.
2868 */
2869 if (params->explicit_connect) {
2870 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2871 continue;
2872 }
2873
Andre Guedes15819a72014-02-03 13:56:18 -03002874 list_del(&params->list);
2875 kfree(params);
2876 }
2877
Johan Hedberg55af49a2014-07-02 17:37:26 +03002878 BT_DBG("All LE disabled connection parameters were removed");
2879}
2880
2881/* This function requires the caller holds hdev->lock */
Johan Hedberg030e7f82015-11-10 09:44:53 +02002882static void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002883{
2884 struct hci_conn_params *params, *tmp;
2885
Johan Hedbergf6c63242014-08-15 21:06:59 +03002886 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2887 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002888
2889 BT_DBG("All LE connection parameters were removed");
2890}
2891
Marcel Holtmann1904a852015-01-11 13:50:44 -08002892static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002893{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002894 if (status) {
2895 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002896
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002897 hci_dev_lock(hdev);
2898 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2899 hci_dev_unlock(hdev);
2900 return;
2901 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002902}
2903
Marcel Holtmann1904a852015-01-11 13:50:44 -08002904static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2905 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002906{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002907 /* General inquiry access code (GIAC) */
2908 u8 lap[3] = { 0x33, 0x8b, 0x9e };
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002909 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002910 int err;
2911
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002912 if (status) {
2913 BT_ERR("Failed to disable LE scanning: status %d", status);
2914 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002915 }
2916
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002917 hdev->discovery.scan_start = 0;
2918
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002919 switch (hdev->discovery.type) {
2920 case DISCOV_TYPE_LE:
2921 hci_dev_lock(hdev);
2922 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2923 hci_dev_unlock(hdev);
2924 break;
2925
2926 case DISCOV_TYPE_INTERLEAVED:
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002927 hci_dev_lock(hdev);
2928
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002929 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2930 &hdev->quirks)) {
2931 /* If we were running LE only scan, change discovery
2932 * state. If we were running both LE and BR/EDR inquiry
2933 * simultaneously, and BR/EDR inquiry is already
2934 * finished, stop discovery, otherwise BR/EDR inquiry
Wesley Kuo177d0502015-05-13 10:33:15 +08002935 * will stop discovery when finished. If we will resolve
2936 * remote device name, do not change discovery state.
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002937 */
Wesley Kuo177d0502015-05-13 10:33:15 +08002938 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2939 hdev->discovery.state != DISCOVERY_RESOLVING)
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002940 hci_discovery_set_state(hdev,
2941 DISCOVERY_STOPPED);
2942 } else {
Johan Hedbergbaf880a2015-03-21 08:02:23 +02002943 struct hci_request req;
2944
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002945 hci_inquiry_cache_flush(hdev);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002946
Johan Hedbergbaf880a2015-03-21 08:02:23 +02002947 hci_req_init(&req, hdev);
2948
2949 memset(&cp, 0, sizeof(cp));
2950 memcpy(&cp.lap, lap, sizeof(cp.lap));
2951 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2952 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2953
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002954 err = hci_req_run(&req, inquiry_complete);
2955 if (err) {
2956 BT_ERR("Inquiry request failed: err %d", err);
2957 hci_discovery_set_state(hdev,
2958 DISCOVERY_STOPPED);
2959 }
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002960 }
2961
2962 hci_dev_unlock(hdev);
2963 break;
2964 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002965}
2966
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002967static void le_scan_disable_work(struct work_struct *work)
2968{
2969 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002970 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002971 struct hci_request req;
2972 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002973
2974 BT_DBG("%s", hdev->name);
2975
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002976 cancel_delayed_work_sync(&hdev->le_scan_restart);
2977
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002978 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002979
Andre Guedesb1efcc22014-02-26 20:21:40 -03002980 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002981
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002982 err = hci_req_run(&req, le_scan_disable_work_complete);
2983 if (err)
2984 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002985}
2986
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002987static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2988 u16 opcode)
2989{
2990 unsigned long timeout, duration, scan_start, now;
2991
2992 BT_DBG("%s", hdev->name);
2993
2994 if (status) {
2995 BT_ERR("Failed to restart LE scan: status %d", status);
2996 return;
2997 }
2998
2999 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3000 !hdev->discovery.scan_start)
3001 return;
3002
3003 /* When the scan was started, hdev->le_scan_disable has been queued
3004 * after duration from scan_start. During scan restart this job
3005 * has been canceled, and we need to queue it again after proper
3006 * timeout, to make sure that scan does not run indefinitely.
3007 */
3008 duration = hdev->discovery.scan_duration;
3009 scan_start = hdev->discovery.scan_start;
3010 now = jiffies;
3011 if (now - scan_start <= duration) {
3012 int elapsed;
3013
3014 if (now >= scan_start)
3015 elapsed = now - scan_start;
3016 else
3017 elapsed = ULONG_MAX - scan_start + now;
3018
3019 timeout = duration - elapsed;
3020 } else {
3021 timeout = 0;
3022 }
3023 queue_delayed_work(hdev->workqueue,
3024 &hdev->le_scan_disable, timeout);
3025}
3026
3027static void le_scan_restart_work(struct work_struct *work)
3028{
3029 struct hci_dev *hdev = container_of(work, struct hci_dev,
3030 le_scan_restart.work);
3031 struct hci_request req;
3032 struct hci_cp_le_set_scan_enable cp;
3033 int err;
3034
3035 BT_DBG("%s", hdev->name);
3036
3037 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003038 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003039 return;
3040
3041 hci_req_init(&req, hdev);
3042
3043 hci_req_add_le_scan_disable(&req);
3044
3045 memset(&cp, 0, sizeof(cp));
3046 cp.enable = LE_SCAN_ENABLE;
3047 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3048 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3049
3050 err = hci_req_run(&req, le_scan_restart_work_complete);
3051 if (err)
3052 BT_ERR("Restart LE scan request failed: err %d", err);
3053}
3054
Johan Hedberga1f4c312014-02-27 14:05:41 +02003055/* Copy the Identity Address of the controller.
3056 *
3057 * If the controller has a public BD_ADDR, then by default use that one.
3058 * If this is a LE only controller without a public address, default to
3059 * the static random address.
3060 *
3061 * For debugging purposes it is possible to force controllers with a
3062 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003063 *
3064 * In case BR/EDR has been disabled on a dual-mode controller and
3065 * userspace has configured a static address, then that address
3066 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02003067 */
3068void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3069 u8 *bdaddr_type)
3070{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07003071 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003072 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003073 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003074 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02003075 bacpy(bdaddr, &hdev->static_addr);
3076 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3077 } else {
3078 bacpy(bdaddr, &hdev->bdaddr);
3079 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3080 }
3081}
3082
David Herrmann9be0dab2012-04-22 14:39:57 +02003083/* Alloc HCI device */
3084struct hci_dev *hci_alloc_dev(void)
3085{
3086 struct hci_dev *hdev;
3087
Johan Hedberg27f70f32014-07-21 10:50:06 +03003088 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003089 if (!hdev)
3090 return NULL;
3091
David Herrmannb1b813d2012-04-22 14:39:58 +02003092 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3093 hdev->esco_type = (ESCO_HV1);
3094 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003095 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3096 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003097 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003098 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3099 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02003100 hdev->adv_instance_cnt = 0;
3101 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02003102 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02003103
David Herrmannb1b813d2012-04-22 14:39:58 +02003104 hdev->sniff_max_interval = 800;
3105 hdev->sniff_min_interval = 80;
3106
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003107 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003108 hdev->le_adv_min_interval = 0x0800;
3109 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003110 hdev->le_scan_interval = 0x0060;
3111 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003112 hdev->le_conn_min_interval = 0x0028;
3113 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003114 hdev->le_conn_latency = 0x0000;
3115 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01003116 hdev->le_def_tx_len = 0x001b;
3117 hdev->le_def_tx_time = 0x0148;
3118 hdev->le_max_tx_len = 0x001b;
3119 hdev->le_max_tx_time = 0x0148;
3120 hdev->le_max_rx_len = 0x001b;
3121 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003122
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003123 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003124 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003125 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3126 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003127
David Herrmannb1b813d2012-04-22 14:39:58 +02003128 mutex_init(&hdev->lock);
3129 mutex_init(&hdev->req_lock);
3130
3131 INIT_LIST_HEAD(&hdev->mgmt_pending);
3132 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003133 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003134 INIT_LIST_HEAD(&hdev->uuids);
3135 INIT_LIST_HEAD(&hdev->link_keys);
3136 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003137 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003138 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003139 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003140 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003141 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003142 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003143 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02003144 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02003145
3146 INIT_WORK(&hdev->rx_work, hci_rx_work);
3147 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3148 INIT_WORK(&hdev->tx_work, hci_tx_work);
3149 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003150 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003151
David Herrmannb1b813d2012-04-22 14:39:58 +02003152 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3153 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3154 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003155 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Florian Grandel5d900e42015-06-18 03:16:35 +02003156 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
David Herrmannb1b813d2012-04-22 14:39:58 +02003157
David Herrmannb1b813d2012-04-22 14:39:58 +02003158 skb_queue_head_init(&hdev->rx_q);
3159 skb_queue_head_init(&hdev->cmd_q);
3160 skb_queue_head_init(&hdev->raw_q);
3161
3162 init_waitqueue_head(&hdev->req_wait_q);
3163
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003164 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003165
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003166 hci_request_setup(hdev);
3167
David Herrmannb1b813d2012-04-22 14:39:58 +02003168 hci_init_sysfs(hdev);
3169 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003170
3171 return hdev;
3172}
3173EXPORT_SYMBOL(hci_alloc_dev);
3174
3175/* Free HCI device */
3176void hci_free_dev(struct hci_dev *hdev)
3177{
David Herrmann9be0dab2012-04-22 14:39:57 +02003178 /* will free via device release */
3179 put_device(&hdev->dev);
3180}
3181EXPORT_SYMBOL(hci_free_dev);
3182
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183/* Register HCI device */
3184int hci_register_dev(struct hci_dev *hdev)
3185{
David Herrmannb1b813d2012-04-22 14:39:58 +02003186 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187
Marcel Holtmann74292d52014-07-06 15:50:27 +02003188 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 return -EINVAL;
3190
Mat Martineau08add512011-11-02 16:18:36 -07003191 /* Do not allow HCI_AMP devices to register at index 0,
3192 * so the index can be used as the AMP controller ID.
3193 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003194 switch (hdev->dev_type) {
3195 case HCI_BREDR:
3196 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3197 break;
3198 case HCI_AMP:
3199 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3200 break;
3201 default:
3202 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003204
Sasha Levin3df92b32012-05-27 22:36:56 +02003205 if (id < 0)
3206 return id;
3207
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 sprintf(hdev->name, "hci%d", id);
3209 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003210
3211 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3212
Kees Cookd8537542013-07-03 15:04:57 -07003213 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3214 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003215 if (!hdev->workqueue) {
3216 error = -ENOMEM;
3217 goto err;
3218 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003219
Kees Cookd8537542013-07-03 15:04:57 -07003220 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3221 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003222 if (!hdev->req_workqueue) {
3223 destroy_workqueue(hdev->workqueue);
3224 error = -ENOMEM;
3225 goto err;
3226 }
3227
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003228 if (!IS_ERR_OR_NULL(bt_debugfs))
3229 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3230
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003231 dev_set_name(&hdev->dev, "%s", hdev->name);
3232
3233 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003234 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003235 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003237 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003238 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3239 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003240 if (hdev->rfkill) {
3241 if (rfkill_register(hdev->rfkill) < 0) {
3242 rfkill_destroy(hdev->rfkill);
3243 hdev->rfkill = NULL;
3244 }
3245 }
3246
Johan Hedberg5e130362013-09-13 08:58:17 +03003247 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003248 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003249
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003250 hci_dev_set_flag(hdev, HCI_SETUP);
3251 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003252
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003253 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003254 /* Assume BR/EDR support until proven otherwise (such as
3255 * through reading supported features during init.
3256 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003257 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003258 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003259
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003260 write_lock(&hci_dev_list_lock);
3261 list_add(&hdev->list, &hci_dev_list);
3262 write_unlock(&hci_dev_list_lock);
3263
Marcel Holtmann4a964402014-07-02 19:10:33 +02003264 /* Devices that are marked for raw-only usage are unconfigured
3265 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003266 */
3267 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003268 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003269
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003270 hci_sock_dev_event(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003271 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272
Johan Hedberg19202572013-01-14 22:33:51 +02003273 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003274
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003276
David Herrmann33ca9542011-10-08 14:58:49 +02003277err_wqueue:
3278 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003279 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003280err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003281 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003282
David Herrmann33ca9542011-10-08 14:58:49 +02003283 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284}
3285EXPORT_SYMBOL(hci_register_dev);
3286
3287/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003288void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003290 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003291
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003292 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003294 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003295
Sasha Levin3df92b32012-05-27 22:36:56 +02003296 id = hdev->id;
3297
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003298 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003300 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301
3302 hci_dev_do_close(hdev);
3303
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003304 cancel_work_sync(&hdev->power_on);
3305
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003306 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003307 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3308 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003309 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003310 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003311 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003312 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003313
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003314 /* mgmt_index_removed should take care of emptying the
3315 * pending list */
3316 BUG_ON(!list_empty(&hdev->mgmt_pending));
3317
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003318 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003319
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003320 if (hdev->rfkill) {
3321 rfkill_unregister(hdev->rfkill);
3322 rfkill_destroy(hdev->rfkill);
3323 }
3324
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003325 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003326
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003327 debugfs_remove_recursive(hdev->debugfs);
3328
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003329 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003330 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003331
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003332 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003333 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003334 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003335 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003336 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003337 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003338 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003339 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003340 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003341 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003342 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003343 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003344 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003345
David Herrmanndc946bd2012-01-07 15:47:24 +01003346 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003347
3348 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349}
3350EXPORT_SYMBOL(hci_unregister_dev);
3351
3352/* Suspend HCI device */
3353int hci_suspend_dev(struct hci_dev *hdev)
3354{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003355 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356 return 0;
3357}
3358EXPORT_SYMBOL(hci_suspend_dev);
3359
3360/* Resume HCI device */
3361int hci_resume_dev(struct hci_dev *hdev)
3362{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003363 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364 return 0;
3365}
3366EXPORT_SYMBOL(hci_resume_dev);
3367
Marcel Holtmann75e05692014-11-02 08:15:38 +01003368/* Reset HCI device */
3369int hci_reset_dev(struct hci_dev *hdev)
3370{
3371 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3372 struct sk_buff *skb;
3373
3374 skb = bt_skb_alloc(3, GFP_ATOMIC);
3375 if (!skb)
3376 return -ENOMEM;
3377
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003378 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
Marcel Holtmann75e05692014-11-02 08:15:38 +01003379 memcpy(skb_put(skb, 3), hw_err, 3);
3380
3381 /* Send Hardware Error to upper stack */
3382 return hci_recv_frame(hdev, skb);
3383}
3384EXPORT_SYMBOL(hci_reset_dev);
3385
Marcel Holtmann76bca882009-11-18 00:40:39 +01003386/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003387int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003388{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003389 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003390 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003391 kfree_skb(skb);
3392 return -ENXIO;
3393 }
3394
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003395 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3396 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3397 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmannfe806dc2015-10-08 03:14:28 +02003398 kfree_skb(skb);
3399 return -EINVAL;
3400 }
3401
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003402 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003403 bt_cb(skb)->incoming = 1;
3404
3405 /* Time stamp */
3406 __net_timestamp(skb);
3407
Marcel Holtmann76bca882009-11-18 00:40:39 +01003408 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003409 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003410
Marcel Holtmann76bca882009-11-18 00:40:39 +01003411 return 0;
3412}
3413EXPORT_SYMBOL(hci_recv_frame);
3414
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003415/* Receive diagnostic message from HCI drivers */
3416int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3417{
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003418 /* Mark as diagnostic packet */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003419 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003420
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003421 /* Time stamp */
3422 __net_timestamp(skb);
3423
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003424 skb_queue_tail(&hdev->rx_q, skb);
3425 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003426
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003427 return 0;
3428}
3429EXPORT_SYMBOL(hci_recv_diag);
3430
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431/* ---- Interface to upper protocols ---- */
3432
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433int hci_register_cb(struct hci_cb *cb)
3434{
3435 BT_DBG("%p name %s", cb, cb->name);
3436
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003437 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003438 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003439 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440
3441 return 0;
3442}
3443EXPORT_SYMBOL(hci_register_cb);
3444
3445int hci_unregister_cb(struct hci_cb *cb)
3446{
3447 BT_DBG("%p name %s", cb, cb->name);
3448
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003449 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003451 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452
3453 return 0;
3454}
3455EXPORT_SYMBOL(hci_unregister_cb);
3456
Marcel Holtmann51086992013-10-10 14:54:19 -07003457static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003459 int err;
3460
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003461 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3462 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003464 /* Time stamp */
3465 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003467 /* Send copy to monitor */
3468 hci_send_to_monitor(hdev, skb);
3469
3470 if (atomic_read(&hdev->promisc)) {
3471 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003472 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 }
3474
3475 /* Get rid of skb owner, prior to sending to the driver. */
3476 skb_orphan(skb);
3477
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003478 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3479 kfree_skb(skb);
3480 return;
3481 }
3482
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003483 err = hdev->send(hdev, skb);
3484 if (err < 0) {
3485 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3486 kfree_skb(skb);
3487 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488}
3489
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003490/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003491int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3492 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003493{
3494 struct sk_buff *skb;
3495
3496 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3497
3498 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3499 if (!skb) {
3500 BT_ERR("%s no memory for command", hdev->name);
3501 return -ENOMEM;
3502 }
3503
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003504 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003505 * single-command requests.
3506 */
Johan Hedberg44d27132015-11-05 09:31:40 +02003507 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg11714b32013-03-05 20:37:47 +02003508
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003510 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511
3512 return 0;
3513}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514
3515/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003516void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517{
3518 struct hci_command_hdr *hdr;
3519
3520 if (!hdev->sent_cmd)
3521 return NULL;
3522
3523 hdr = (void *) hdev->sent_cmd->data;
3524
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003525 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526 return NULL;
3527
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003528 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529
3530 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3531}
3532
Loic Poulainfbef1682015-09-29 15:05:44 +02003533/* Send HCI command and wait for command commplete event */
3534struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3535 const void *param, u32 timeout)
3536{
3537 struct sk_buff *skb;
3538
3539 if (!test_bit(HCI_UP, &hdev->flags))
3540 return ERR_PTR(-ENETDOWN);
3541
3542 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3543
Johan Hedbergb5044302015-11-10 09:44:55 +02003544 hci_req_sync_lock(hdev);
Loic Poulainfbef1682015-09-29 15:05:44 +02003545 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
Johan Hedbergb5044302015-11-10 09:44:55 +02003546 hci_req_sync_unlock(hdev);
Loic Poulainfbef1682015-09-29 15:05:44 +02003547
3548 return skb;
3549}
3550EXPORT_SYMBOL(hci_cmd_sync);
3551
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552/* Send ACL data */
3553static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3554{
3555 struct hci_acl_hdr *hdr;
3556 int len = skb->len;
3557
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003558 skb_push(skb, HCI_ACL_HDR_SIZE);
3559 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003560 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003561 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3562 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563}
3564
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003565static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003566 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003568 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569 struct hci_dev *hdev = conn->hdev;
3570 struct sk_buff *list;
3571
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003572 skb->len = skb_headlen(skb);
3573 skb->data_len = 0;
3574
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003575 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003576
3577 switch (hdev->dev_type) {
3578 case HCI_BREDR:
3579 hci_add_acl_hdr(skb, conn->handle, flags);
3580 break;
3581 case HCI_AMP:
3582 hci_add_acl_hdr(skb, chan->handle, flags);
3583 break;
3584 default:
3585 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3586 return;
3587 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003588
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003589 list = skb_shinfo(skb)->frag_list;
3590 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591 /* Non fragmented */
3592 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3593
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003594 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 } else {
3596 /* Fragmented */
3597 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3598
3599 skb_shinfo(skb)->frag_list = NULL;
3600
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003601 /* Queue all fragments atomically. We need to use spin_lock_bh
3602 * here because of 6LoWPAN links, as there this function is
3603 * called from softirq and using normal spin lock could cause
3604 * deadlocks.
3605 */
3606 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003608 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003609
3610 flags &= ~ACL_START;
3611 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612 do {
3613 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003614
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003615 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003616 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617
3618 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3619
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003620 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621 } while (list);
3622
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003623 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003625}
3626
3627void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3628{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003629 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003630
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003631 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003632
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003633 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003635 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637
3638/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003639void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640{
3641 struct hci_dev *hdev = conn->hdev;
3642 struct hci_sco_hdr hdr;
3643
3644 BT_DBG("%s len %d", hdev->name, skb->len);
3645
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003646 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647 hdr.dlen = skb->len;
3648
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003649 skb_push(skb, HCI_SCO_HDR_SIZE);
3650 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003651 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003653 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003654
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003656 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658
3659/* ---- HCI TX task (outgoing data) ---- */
3660
3661/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003662static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3663 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664{
3665 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003666 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003667 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003669 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003671
3672 rcu_read_lock();
3673
3674 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003675 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003676 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003677
3678 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3679 continue;
3680
Linus Torvalds1da177e2005-04-16 15:20:36 -07003681 num++;
3682
3683 if (c->sent < min) {
3684 min = c->sent;
3685 conn = c;
3686 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003687
3688 if (hci_conn_num(hdev, type) == num)
3689 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690 }
3691
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003692 rcu_read_unlock();
3693
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003695 int cnt, q;
3696
3697 switch (conn->type) {
3698 case ACL_LINK:
3699 cnt = hdev->acl_cnt;
3700 break;
3701 case SCO_LINK:
3702 case ESCO_LINK:
3703 cnt = hdev->sco_cnt;
3704 break;
3705 case LE_LINK:
3706 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3707 break;
3708 default:
3709 cnt = 0;
3710 BT_ERR("Unknown link type");
3711 }
3712
3713 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714 *quote = q ? q : 1;
3715 } else
3716 *quote = 0;
3717
3718 BT_DBG("conn %p quote %d", conn, *quote);
3719 return conn;
3720}
3721
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003722static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723{
3724 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003725 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726
Ville Tervobae1f5d92011-02-10 22:38:53 -03003727 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003729 rcu_read_lock();
3730
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003732 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003733 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003734 BT_ERR("%s killing stalled connection %pMR",
3735 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003736 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737 }
3738 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003739
3740 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741}
3742
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003743static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3744 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003745{
3746 struct hci_conn_hash *h = &hdev->conn_hash;
3747 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003748 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003749 struct hci_conn *conn;
3750 int cnt, q, conn_num = 0;
3751
3752 BT_DBG("%s", hdev->name);
3753
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003754 rcu_read_lock();
3755
3756 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003757 struct hci_chan *tmp;
3758
3759 if (conn->type != type)
3760 continue;
3761
3762 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3763 continue;
3764
3765 conn_num++;
3766
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003767 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003768 struct sk_buff *skb;
3769
3770 if (skb_queue_empty(&tmp->data_q))
3771 continue;
3772
3773 skb = skb_peek(&tmp->data_q);
3774 if (skb->priority < cur_prio)
3775 continue;
3776
3777 if (skb->priority > cur_prio) {
3778 num = 0;
3779 min = ~0;
3780 cur_prio = skb->priority;
3781 }
3782
3783 num++;
3784
3785 if (conn->sent < min) {
3786 min = conn->sent;
3787 chan = tmp;
3788 }
3789 }
3790
3791 if (hci_conn_num(hdev, type) == conn_num)
3792 break;
3793 }
3794
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003795 rcu_read_unlock();
3796
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003797 if (!chan)
3798 return NULL;
3799
3800 switch (chan->conn->type) {
3801 case ACL_LINK:
3802 cnt = hdev->acl_cnt;
3803 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003804 case AMP_LINK:
3805 cnt = hdev->block_cnt;
3806 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003807 case SCO_LINK:
3808 case ESCO_LINK:
3809 cnt = hdev->sco_cnt;
3810 break;
3811 case LE_LINK:
3812 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3813 break;
3814 default:
3815 cnt = 0;
3816 BT_ERR("Unknown link type");
3817 }
3818
3819 q = cnt / num;
3820 *quote = q ? q : 1;
3821 BT_DBG("chan %p quote %d", chan, *quote);
3822 return chan;
3823}
3824
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003825static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3826{
3827 struct hci_conn_hash *h = &hdev->conn_hash;
3828 struct hci_conn *conn;
3829 int num = 0;
3830
3831 BT_DBG("%s", hdev->name);
3832
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003833 rcu_read_lock();
3834
3835 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003836 struct hci_chan *chan;
3837
3838 if (conn->type != type)
3839 continue;
3840
3841 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3842 continue;
3843
3844 num++;
3845
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003846 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003847 struct sk_buff *skb;
3848
3849 if (chan->sent) {
3850 chan->sent = 0;
3851 continue;
3852 }
3853
3854 if (skb_queue_empty(&chan->data_q))
3855 continue;
3856
3857 skb = skb_peek(&chan->data_q);
3858 if (skb->priority >= HCI_PRIO_MAX - 1)
3859 continue;
3860
3861 skb->priority = HCI_PRIO_MAX - 1;
3862
3863 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003864 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003865 }
3866
3867 if (hci_conn_num(hdev, type) == num)
3868 break;
3869 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003870
3871 rcu_read_unlock();
3872
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003873}
3874
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003875static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3876{
3877 /* Calculate count of blocks used by this packet */
3878 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3879}
3880
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003881static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003883 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884 /* ACL tx timeout must be longer than maximum
3885 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003886 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003887 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003888 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003890}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003892static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003893{
3894 unsigned int cnt = hdev->acl_cnt;
3895 struct hci_chan *chan;
3896 struct sk_buff *skb;
3897 int quote;
3898
3899 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003900
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003901 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003902 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003903 u32 priority = (skb_peek(&chan->data_q))->priority;
3904 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003905 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003906 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003907
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003908 /* Stop if priority has changed */
3909 if (skb->priority < priority)
3910 break;
3911
3912 skb = skb_dequeue(&chan->data_q);
3913
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003914 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003915 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003916
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003917 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003918 hdev->acl_last_tx = jiffies;
3919
3920 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003921 chan->sent++;
3922 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923 }
3924 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003925
3926 if (cnt != hdev->acl_cnt)
3927 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928}
3929
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003930static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003931{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003932 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003933 struct hci_chan *chan;
3934 struct sk_buff *skb;
3935 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003936 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003937
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003938 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003939
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003940 BT_DBG("%s", hdev->name);
3941
3942 if (hdev->dev_type == HCI_AMP)
3943 type = AMP_LINK;
3944 else
3945 type = ACL_LINK;
3946
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003947 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003948 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003949 u32 priority = (skb_peek(&chan->data_q))->priority;
3950 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3951 int blocks;
3952
3953 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003954 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003955
3956 /* Stop if priority has changed */
3957 if (skb->priority < priority)
3958 break;
3959
3960 skb = skb_dequeue(&chan->data_q);
3961
3962 blocks = __get_blocks(hdev, skb);
3963 if (blocks > hdev->block_cnt)
3964 return;
3965
3966 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003967 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003968
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003969 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003970 hdev->acl_last_tx = jiffies;
3971
3972 hdev->block_cnt -= blocks;
3973 quote -= blocks;
3974
3975 chan->sent += blocks;
3976 chan->conn->sent += blocks;
3977 }
3978 }
3979
3980 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003981 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003982}
3983
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003984static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003985{
3986 BT_DBG("%s", hdev->name);
3987
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003988 /* No ACL link over BR/EDR controller */
3989 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3990 return;
3991
3992 /* No AMP link over AMP controller */
3993 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003994 return;
3995
3996 switch (hdev->flow_ctl_mode) {
3997 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3998 hci_sched_acl_pkt(hdev);
3999 break;
4000
4001 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4002 hci_sched_acl_blk(hdev);
4003 break;
4004 }
4005}
4006
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004008static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009{
4010 struct hci_conn *conn;
4011 struct sk_buff *skb;
4012 int quote;
4013
4014 BT_DBG("%s", hdev->name);
4015
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004016 if (!hci_conn_num(hdev, SCO_LINK))
4017 return;
4018
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4020 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4021 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004022 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023
4024 conn->sent++;
4025 if (conn->sent == ~0)
4026 conn->sent = 0;
4027 }
4028 }
4029}
4030
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004031static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004032{
4033 struct hci_conn *conn;
4034 struct sk_buff *skb;
4035 int quote;
4036
4037 BT_DBG("%s", hdev->name);
4038
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004039 if (!hci_conn_num(hdev, ESCO_LINK))
4040 return;
4041
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004042 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4043 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004044 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4045 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004046 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004047
4048 conn->sent++;
4049 if (conn->sent == ~0)
4050 conn->sent = 0;
4051 }
4052 }
4053}
4054
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004055static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004056{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004057 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004058 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004059 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004060
4061 BT_DBG("%s", hdev->name);
4062
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004063 if (!hci_conn_num(hdev, LE_LINK))
4064 return;
4065
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004066 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004067 /* LE tx timeout must be longer than maximum
4068 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004069 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004070 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004071 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004072 }
4073
4074 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004075 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004076 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004077 u32 priority = (skb_peek(&chan->data_q))->priority;
4078 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004079 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004080 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004081
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004082 /* Stop if priority has changed */
4083 if (skb->priority < priority)
4084 break;
4085
4086 skb = skb_dequeue(&chan->data_q);
4087
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004088 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004089 hdev->le_last_tx = jiffies;
4090
4091 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004092 chan->sent++;
4093 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004094 }
4095 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004096
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004097 if (hdev->le_pkts)
4098 hdev->le_cnt = cnt;
4099 else
4100 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004101
4102 if (cnt != tmp)
4103 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004104}
4105
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004106static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004108 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109 struct sk_buff *skb;
4110
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004111 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004112 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004114 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07004115 /* Schedule queues and send stuff to HCI driver */
4116 hci_sched_acl(hdev);
4117 hci_sched_sco(hdev);
4118 hci_sched_esco(hdev);
4119 hci_sched_le(hdev);
4120 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004121
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122 /* Send next queued raw (unknown type) packet */
4123 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004124 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125}
4126
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004127/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128
4129/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004130static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131{
4132 struct hci_acl_hdr *hdr = (void *) skb->data;
4133 struct hci_conn *conn;
4134 __u16 handle, flags;
4135
4136 skb_pull(skb, HCI_ACL_HDR_SIZE);
4137
4138 handle = __le16_to_cpu(hdr->handle);
4139 flags = hci_flags(handle);
4140 handle = hci_handle(handle);
4141
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004142 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004143 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144
4145 hdev->stat.acl_rx++;
4146
4147 hci_dev_lock(hdev);
4148 conn = hci_conn_hash_lookup_handle(hdev, handle);
4149 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004150
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004152 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004153
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004155 l2cap_recv_acldata(conn, skb, flags);
4156 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004158 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004159 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160 }
4161
4162 kfree_skb(skb);
4163}
4164
4165/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004166static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167{
4168 struct hci_sco_hdr *hdr = (void *) skb->data;
4169 struct hci_conn *conn;
4170 __u16 handle;
4171
4172 skb_pull(skb, HCI_SCO_HDR_SIZE);
4173
4174 handle = __le16_to_cpu(hdr->handle);
4175
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004176 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177
4178 hdev->stat.sco_rx++;
4179
4180 hci_dev_lock(hdev);
4181 conn = hci_conn_hash_lookup_handle(hdev, handle);
4182 hci_dev_unlock(hdev);
4183
4184 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004186 sco_recv_scodata(conn, skb);
4187 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004189 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004190 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191 }
4192
4193 kfree_skb(skb);
4194}
4195
Johan Hedberg9238f362013-03-05 20:37:48 +02004196static bool hci_req_is_complete(struct hci_dev *hdev)
4197{
4198 struct sk_buff *skb;
4199
4200 skb = skb_peek(&hdev->cmd_q);
4201 if (!skb)
4202 return true;
4203
Johan Hedberg44d27132015-11-05 09:31:40 +02004204 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
Johan Hedberg9238f362013-03-05 20:37:48 +02004205}
4206
Johan Hedberg42c6b122013-03-05 20:37:49 +02004207static void hci_resend_last(struct hci_dev *hdev)
4208{
4209 struct hci_command_hdr *sent;
4210 struct sk_buff *skb;
4211 u16 opcode;
4212
4213 if (!hdev->sent_cmd)
4214 return;
4215
4216 sent = (void *) hdev->sent_cmd->data;
4217 opcode = __le16_to_cpu(sent->opcode);
4218 if (opcode == HCI_OP_RESET)
4219 return;
4220
4221 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4222 if (!skb)
4223 return;
4224
4225 skb_queue_head(&hdev->cmd_q, skb);
4226 queue_work(hdev->workqueue, &hdev->cmd_work);
4227}
4228
Johan Hedberge62144872015-04-02 13:41:08 +03004229void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4230 hci_req_complete_t *req_complete,
4231 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004232{
Johan Hedberg9238f362013-03-05 20:37:48 +02004233 struct sk_buff *skb;
4234 unsigned long flags;
4235
4236 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4237
Johan Hedberg42c6b122013-03-05 20:37:49 +02004238 /* If the completed command doesn't match the last one that was
4239 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004240 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004241 if (!hci_sent_cmd_data(hdev, opcode)) {
4242 /* Some CSR based controllers generate a spontaneous
4243 * reset complete event during init and any pending
4244 * command will never be completed. In such a case we
4245 * need to resend whatever was the last sent
4246 * command.
4247 */
4248 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4249 hci_resend_last(hdev);
4250
Johan Hedberg9238f362013-03-05 20:37:48 +02004251 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004252 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004253
4254 /* If the command succeeded and there's still more commands in
4255 * this request the request is not yet complete.
4256 */
4257 if (!status && !hci_req_is_complete(hdev))
4258 return;
4259
4260 /* If this was the last command in a request the complete
4261 * callback would be found in hdev->sent_cmd instead of the
4262 * command queue (hdev->cmd_q).
4263 */
Johan Hedberg44d27132015-11-05 09:31:40 +02004264 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4265 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
Johan Hedberge62144872015-04-02 13:41:08 +03004266 return;
4267 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004268
Johan Hedberg44d27132015-11-05 09:31:40 +02004269 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4270 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
Johan Hedberge62144872015-04-02 13:41:08 +03004271 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004272 }
4273
4274 /* Remove all pending commands belonging to this request */
4275 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4276 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Johan Hedberg44d27132015-11-05 09:31:40 +02004277 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004278 __skb_queue_head(&hdev->cmd_q, skb);
4279 break;
4280 }
4281
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004282 *req_complete = bt_cb(skb)->hci.req_complete;
4283 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
Johan Hedberg9238f362013-03-05 20:37:48 +02004284 kfree_skb(skb);
4285 }
4286 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004287}
4288
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004289static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004291 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292 struct sk_buff *skb;
4293
4294 BT_DBG("%s", hdev->name);
4295
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004297 /* Send copy to monitor */
4298 hci_send_to_monitor(hdev, skb);
4299
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 if (atomic_read(&hdev->promisc)) {
4301 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004302 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 }
4304
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004305 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306 kfree_skb(skb);
4307 continue;
4308 }
4309
4310 if (test_bit(HCI_INIT, &hdev->flags)) {
4311 /* Don't process data packets in this states. */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004312 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313 case HCI_ACLDATA_PKT:
4314 case HCI_SCODATA_PKT:
4315 kfree_skb(skb);
4316 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004317 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318 }
4319
4320 /* Process frame */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004321 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004323 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 hci_event_packet(hdev, skb);
4325 break;
4326
4327 case HCI_ACLDATA_PKT:
4328 BT_DBG("%s ACL data packet", hdev->name);
4329 hci_acldata_packet(hdev, skb);
4330 break;
4331
4332 case HCI_SCODATA_PKT:
4333 BT_DBG("%s SCO data packet", hdev->name);
4334 hci_scodata_packet(hdev, skb);
4335 break;
4336
4337 default:
4338 kfree_skb(skb);
4339 break;
4340 }
4341 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342}
4343
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004344static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004346 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347 struct sk_buff *skb;
4348
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004349 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4350 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004353 if (atomic_read(&hdev->cmd_cnt)) {
4354 skb = skb_dequeue(&hdev->cmd_q);
4355 if (!skb)
4356 return;
4357
Wei Yongjun7585b972009-02-25 18:29:52 +08004358 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004360 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004361 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004363 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004364 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004365 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004366 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004367 schedule_delayed_work(&hdev->cmd_timer,
4368 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369 } else {
4370 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004371 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372 }
4373 }
4374}