blob: ee8ef12282639d09ac975c7ef0e09b644a719009 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +010043#include "leds.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020044
Marcel Holtmannb78752c2010-08-08 23:06:53 -040045static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020046static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020047static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049/* HCI device list */
50LIST_HEAD(hci_dev_list);
51DEFINE_RWLOCK(hci_dev_list_lock);
52
53/* HCI callback list */
54LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020055DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Sasha Levin3df92b32012-05-27 22:36:56 +020057/* HCI ID Numbering */
58static DEFINE_IDA(hci_index_ida);
59
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070060/* ---- HCI debugfs entries ---- */
61
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070062static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64{
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
Prasanna Karthik74b93e92015-11-18 12:38:41 +000068 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070069 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72}
73
74static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76{
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070079 bool enable;
Andy Shevchenko3bf5e972018-05-29 16:33:48 +030080 int err;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070081
82 if (!test_bit(HCI_UP, &hdev->flags))
83 return -ENETDOWN;
84
Andy Shevchenko3bf5e972018-05-29 16:33:48 +030085 err = kstrtobool_from_user(user_buf, count, &enable);
86 if (err)
87 return err;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070088
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070089 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070090 return -EALREADY;
91
Johan Hedbergb5044302015-11-10 09:44:55 +020092 hci_req_sync_lock(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070093 if (enable)
94 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
95 HCI_CMD_TIMEOUT);
96 else
97 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
98 HCI_CMD_TIMEOUT);
Johan Hedbergb5044302015-11-10 09:44:55 +020099 hci_req_sync_unlock(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700100
101 if (IS_ERR(skb))
102 return PTR_ERR(skb);
103
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700104 kfree_skb(skb);
105
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700106 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700107
108 return count;
109}
110
111static const struct file_operations dut_mode_fops = {
112 .open = simple_open,
113 .read = dut_mode_read,
114 .write = dut_mode_write,
115 .llseek = default_llseek,
116};
117
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200118static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
119 size_t count, loff_t *ppos)
120{
121 struct hci_dev *hdev = file->private_data;
122 char buf[3];
123
Prasanna Karthik74b93e92015-11-18 12:38:41 +0000124 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200125 buf[1] = '\n';
126 buf[2] = '\0';
127 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
128}
129
130static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
131 size_t count, loff_t *ppos)
132{
133 struct hci_dev *hdev = file->private_data;
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200134 bool enable;
135 int err;
136
Andy Shevchenko3bf5e972018-05-29 16:33:48 +0300137 err = kstrtobool_from_user(user_buf, count, &enable);
138 if (err)
139 return err;
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200140
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200141 /* When the diagnostic flags are not persistent and the transport
Marcel Holtmannb56c7b22017-05-02 12:43:31 -0700142 * is not active or in user channel operation, then there is no need
143 * for the vendor callback. Instead just store the desired value and
144 * the setting will be programmed when the controller gets powered on.
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200145 */
146 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
Marcel Holtmannb56c7b22017-05-02 12:43:31 -0700147 (!test_bit(HCI_RUNNING, &hdev->flags) ||
148 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200149 goto done;
150
Johan Hedbergb5044302015-11-10 09:44:55 +0200151 hci_req_sync_lock(hdev);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200152 err = hdev->set_diag(hdev, enable);
Johan Hedbergb5044302015-11-10 09:44:55 +0200153 hci_req_sync_unlock(hdev);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200154
155 if (err < 0)
156 return err;
157
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200158done:
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200159 if (enable)
160 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
161 else
162 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
163
164 return count;
165}
166
167static const struct file_operations vendor_diag_fops = {
168 .open = simple_open,
169 .read = vendor_diag_read,
170 .write = vendor_diag_write,
171 .llseek = default_llseek,
172};
173
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200174static void hci_debugfs_create_basic(struct hci_dev *hdev)
175{
176 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
177 &dut_mode_fops);
178
179 if (hdev->set_diag)
180 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
181 &vendor_diag_fops);
182}
183
Johan Hedberga1d01db2015-11-11 08:11:25 +0200184static int hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200186 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200189 set_bit(HCI_RESET, &req->hdev->flags);
190 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200191 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192}
193
Johan Hedberg42c6b122013-03-05 20:37:49 +0200194static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200196 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200199 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200201 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200202 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200203
204 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200205 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206}
207
Johan Hedberg0af801b2015-02-17 15:05:21 +0200208static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200209{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200211
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200212 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200213 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300214
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700215 /* Read Local Supported Commands */
216 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
217
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300218 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200219 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300220
221 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200222 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700223
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700224 /* Read Flow Control Mode */
225 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
226
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700227 /* Read Location Data */
228 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200229}
230
Johan Hedberga1d01db2015-11-11 08:11:25 +0200231static int amp_init2(struct hci_request *req)
Johan Hedberg0af801b2015-02-17 15:05:21 +0200232{
233 /* Read Local Supported Features. Not all AMP controllers
234 * support this so it's placed conditionally in the second
235 * stage init.
236 */
237 if (req->hdev->commands[14] & 0x20)
238 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200239
240 return 0;
Johan Hedberg0af801b2015-02-17 15:05:21 +0200241}
242
Johan Hedberga1d01db2015-11-11 08:11:25 +0200243static int hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200244{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200245 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246
247 BT_DBG("%s %ld", hdev->name, opt);
248
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300249 /* Reset */
250 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200251 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300252
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200253 switch (hdev->dev_type) {
Marcel Holtmannca8bee52016-07-05 14:30:14 +0200254 case HCI_PRIMARY:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200255 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200256 break;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200257 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200258 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200259 break;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200260 default:
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100261 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200262 break;
263 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200264
265 return 0;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200266}
267
Johan Hedberg42c6b122013-03-05 20:37:49 +0200268static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200269{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200270 __le16 param;
271 __u8 flt_type;
272
273 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200274 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200275
276 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200277 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200278
279 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200281
282 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200283 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200284
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700285 /* Read Number of Supported IAC */
286 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
287
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700288 /* Read Current IAC LAP */
289 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
290
Johan Hedberg2177bab2013-03-05 20:37:43 +0200291 /* Clear Event Filters */
292 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200293 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200294
295 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700296 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298}
299
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200301{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300302 struct hci_dev *hdev = req->hdev;
303
Johan Hedberg2177bab2013-03-05 20:37:43 +0200304 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200306
307 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200309
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800310 /* Read LE Supported States */
311 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
312
Johan Hedbergc73eee92013-04-19 18:35:21 +0300313 /* LE-only controllers have LE implicitly enabled */
314 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700315 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200316}
317
Johan Hedberg42c6b122013-03-05 20:37:49 +0200318static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200319{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320 struct hci_dev *hdev = req->hdev;
321
Johan Hedberg2177bab2013-03-05 20:37:43 +0200322 /* The second byte is 0xff instead of 0x9f (two reserved bits
323 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
324 * command otherwise.
325 */
326 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
327
328 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
329 * any event mask for pre 1.2 devices.
330 */
331 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
332 return;
333
334 if (lmp_bredr_capable(hdev)) {
335 events[4] |= 0x01; /* Flow Specification Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700336 } else {
337 /* Use a different default for LE-only devices */
338 memset(events, 0, sizeof(events));
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700339 events[1] |= 0x20; /* Command Complete */
340 events[1] |= 0x40; /* Command Status */
341 events[1] |= 0x80; /* Hardware Error */
Marcel Holtmann5c3d3b42015-11-04 07:17:23 +0100342
343 /* If the controller supports the Disconnect command, enable
344 * the corresponding event. In addition enable packet flow
345 * control related events.
346 */
347 if (hdev->commands[0] & 0x20) {
348 events[0] |= 0x10; /* Disconnection Complete */
349 events[2] |= 0x04; /* Number of Completed Packets */
350 events[3] |= 0x02; /* Data Buffer Overflow */
351 }
352
353 /* If the controller supports the Read Remote Version
354 * Information command, enable the corresponding event.
355 */
356 if (hdev->commands[2] & 0x80)
357 events[1] |= 0x08; /* Read Remote Version Information
358 * Complete
359 */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200360
361 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
362 events[0] |= 0x80; /* Encryption Change */
363 events[5] |= 0x80; /* Encryption Key Refresh Complete */
364 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200365 }
366
Marcel Holtmann9fe759c2015-11-01 09:45:22 +0100367 if (lmp_inq_rssi_capable(hdev) ||
368 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
Johan Hedberg2177bab2013-03-05 20:37:43 +0200369 events[4] |= 0x02; /* Inquiry Result with RSSI */
370
Marcel Holtmann70f56aa2015-11-01 09:39:49 +0100371 if (lmp_ext_feat_capable(hdev))
372 events[4] |= 0x04; /* Read Remote Extended Features Complete */
373
374 if (lmp_esco_capable(hdev)) {
375 events[5] |= 0x08; /* Synchronous Connection Complete */
376 events[5] |= 0x10; /* Synchronous Connection Changed */
377 }
378
Johan Hedberg2177bab2013-03-05 20:37:43 +0200379 if (lmp_sniffsubr_capable(hdev))
380 events[5] |= 0x20; /* Sniff Subrating */
381
382 if (lmp_pause_enc_capable(hdev))
383 events[5] |= 0x80; /* Encryption Key Refresh Complete */
384
385 if (lmp_ext_inq_capable(hdev))
386 events[5] |= 0x40; /* Extended Inquiry Result */
387
388 if (lmp_no_flush_capable(hdev))
389 events[7] |= 0x01; /* Enhanced Flush Complete */
390
391 if (lmp_lsto_capable(hdev))
392 events[6] |= 0x80; /* Link Supervision Timeout Changed */
393
394 if (lmp_ssp_capable(hdev)) {
395 events[6] |= 0x01; /* IO Capability Request */
396 events[6] |= 0x02; /* IO Capability Response */
397 events[6] |= 0x04; /* User Confirmation Request */
398 events[6] |= 0x08; /* User Passkey Request */
399 events[6] |= 0x10; /* Remote OOB Data Request */
400 events[6] |= 0x20; /* Simple Pairing Complete */
401 events[7] |= 0x04; /* User Passkey Notification */
402 events[7] |= 0x08; /* Keypress Notification */
403 events[7] |= 0x10; /* Remote Host Supported
404 * Features Notification
405 */
406 }
407
408 if (lmp_le_capable(hdev))
409 events[7] |= 0x20; /* LE Meta-Event */
410
Johan Hedberg42c6b122013-03-05 20:37:49 +0200411 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200412}
413
Johan Hedberga1d01db2015-11-11 08:11:25 +0200414static int hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200415{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200416 struct hci_dev *hdev = req->hdev;
417
Johan Hedberg0af801b2015-02-17 15:05:21 +0200418 if (hdev->dev_type == HCI_AMP)
419 return amp_init2(req);
420
Johan Hedberg2177bab2013-03-05 20:37:43 +0200421 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200422 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300423 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700424 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200425
426 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200428
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100429 /* All Bluetooth 1.2 and later controllers should support the
430 * HCI command for reading the local supported commands.
431 *
432 * Unfortunately some controllers indicate Bluetooth 1.2 support,
433 * but do not have support for this command. If that is the case,
434 * the driver can quirk the behavior and skip reading the local
435 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300436 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100437 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
438 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200439 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200440
441 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700442 /* When SSP is available, then the host features page
443 * should also be available as well. However some
444 * controllers list the max_page as 0 as long as SSP
445 * has not been enabled. To achieve proper debugging
446 * output, force the minimum max_page to 1 at least.
447 */
448 hdev->max_page = 0x01;
449
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700450 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800452
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
454 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200455 } else {
456 struct hci_cp_write_eir cp;
457
458 memset(hdev->eir, 0, sizeof(hdev->eir));
459 memset(&cp, 0, sizeof(cp));
460
Johan Hedberg42c6b122013-03-05 20:37:49 +0200461 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200462 }
463 }
464
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800465 if (lmp_inq_rssi_capable(hdev) ||
466 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800467 u8 mode;
468
469 /* If Extended Inquiry Result events are supported, then
470 * they are clearly preferred over Inquiry Result with RSSI
471 * events.
472 */
473 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
474
475 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
476 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200477
478 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200479 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480
481 if (lmp_ext_feat_capable(hdev)) {
482 struct hci_cp_read_local_ext_features cp;
483
484 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200485 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
486 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200487 }
488
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700489 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200490 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200491 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
492 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200493 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200494
495 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200496}
497
Johan Hedberg42c6b122013-03-05 20:37:49 +0200498static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200499{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200500 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200501 struct hci_cp_write_def_link_policy cp;
502 u16 link_policy = 0;
503
504 if (lmp_rswitch_capable(hdev))
505 link_policy |= HCI_LP_RSWITCH;
506 if (lmp_hold_capable(hdev))
507 link_policy |= HCI_LP_HOLD;
508 if (lmp_sniff_capable(hdev))
509 link_policy |= HCI_LP_SNIFF;
510 if (lmp_park_capable(hdev))
511 link_policy |= HCI_LP_PARK;
512
513 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200514 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200515}
516
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200519 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 struct hci_cp_write_le_host_supported cp;
521
Johan Hedbergc73eee92013-04-19 18:35:21 +0300522 /* LE-only devices do not support explicit enablement */
523 if (!lmp_bredr_capable(hdev))
524 return;
525
Johan Hedberg2177bab2013-03-05 20:37:43 +0200526 memset(&cp, 0, sizeof(cp));
527
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700528 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200530 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200531 }
532
533 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
535 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536}
537
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300538static void hci_set_event_mask_page_2(struct hci_request *req)
539{
540 struct hci_dev *hdev = req->hdev;
541 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
Marcel Holtmann313f6882017-06-09 18:43:56 +0200542 bool changed = false;
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300543
544 /* If Connectionless Slave Broadcast master role is supported
545 * enable all necessary events for it.
546 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800547 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300548 events[1] |= 0x40; /* Triggered Clock Capture */
549 events[1] |= 0x80; /* Synchronization Train Complete */
550 events[2] |= 0x10; /* Slave Page Response Timeout */
551 events[2] |= 0x20; /* CSB Channel Map Change */
Marcel Holtmann313f6882017-06-09 18:43:56 +0200552 changed = true;
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300553 }
554
555 /* If Connectionless Slave Broadcast slave role is supported
556 * enable all necessary events for it.
557 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800558 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300559 events[2] |= 0x01; /* Synchronization Train Received */
560 events[2] |= 0x02; /* CSB Receive */
561 events[2] |= 0x04; /* CSB Timeout */
562 events[2] |= 0x08; /* Truncated Page Complete */
Marcel Holtmann313f6882017-06-09 18:43:56 +0200563 changed = true;
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300564 }
565
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800566 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmann313f6882017-06-09 18:43:56 +0200567 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800568 events[2] |= 0x80;
Marcel Holtmann313f6882017-06-09 18:43:56 +0200569 changed = true;
570 }
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800571
Marcel Holtmann313f6882017-06-09 18:43:56 +0200572 /* Some Broadcom based controllers indicate support for Set Event
573 * Mask Page 2 command, but then actually do not support it. Since
574 * the default value is all bits set to zero, the command is only
575 * required if the event mask has to be changed. In case no change
576 * to the event mask is needed, skip this command.
577 */
578 if (changed)
579 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
580 sizeof(events), events);
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300581}
582
Johan Hedberga1d01db2015-11-11 08:11:25 +0200583static int hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200584{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200585 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300586 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200587
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200588 hci_setup_event_mask(req);
589
Johan Hedberge81be902015-08-30 21:47:20 +0300590 if (hdev->commands[6] & 0x20 &&
591 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800592 struct hci_cp_read_stored_link_key cp;
593
594 bacpy(&cp.bdaddr, BDADDR_ANY);
595 cp.read_all = 0x01;
596 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
597 }
598
Johan Hedberg2177bab2013-03-05 20:37:43 +0200599 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200600 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200601
Marcel Holtmann417287d2014-12-11 20:21:54 +0100602 if (hdev->commands[8] & 0x01)
603 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
604
605 /* Some older Broadcom based Bluetooth 1.2 controllers do not
606 * support the Read Page Scan Type command. Check support for
607 * this command in the bit mask of supported commands.
608 */
609 if (hdev->commands[13] & 0x01)
610 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
611
Andre Guedes9193c6e2014-07-01 18:10:09 -0300612 if (lmp_le_capable(hdev)) {
613 u8 events[8];
614
615 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200616
617 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
618 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300619
620 /* If controller supports the Connection Parameters Request
621 * Link Layer Procedure, enable the corresponding event.
622 */
623 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
624 events[0] |= 0x20; /* LE Remote Connection
625 * Parameter Request
626 */
627
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100628 /* If the controller supports the Data Length Extension
629 * feature, enable the corresponding event.
630 */
631 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
632 events[0] |= 0x40; /* LE Data Length Change */
633
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100634 /* If the controller supports Extended Scanner Filter
635 * Policies, enable the correspondig event.
636 */
637 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
638 events[1] |= 0x04; /* LE Direct Advertising
639 * Report
640 */
641
Marcel Holtmann9756d332017-05-01 23:54:17 -0700642 /* If the controller supports Channel Selection Algorithm #2
643 * feature, enable the corresponding event.
644 */
645 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
646 events[2] |= 0x08; /* LE Channel Selection
647 * Algorithm
648 */
649
Marcel Holtmann7d26f5c2015-11-01 09:39:51 +0100650 /* If the controller supports the LE Set Scan Enable command,
651 * enable the corresponding advertising report event.
652 */
653 if (hdev->commands[26] & 0x08)
654 events[0] |= 0x02; /* LE Advertising Report */
655
656 /* If the controller supports the LE Create Connection
657 * command, enable the corresponding event.
658 */
659 if (hdev->commands[26] & 0x10)
660 events[0] |= 0x01; /* LE Connection Complete */
661
662 /* If the controller supports the LE Connection Update
663 * command, enable the corresponding event.
664 */
665 if (hdev->commands[27] & 0x04)
666 events[0] |= 0x04; /* LE Connection Update
667 * Complete
668 */
669
670 /* If the controller supports the LE Read Remote Used Features
671 * command, enable the corresponding event.
672 */
673 if (hdev->commands[27] & 0x20)
674 events[0] |= 0x08; /* LE Read Remote Used
675 * Features Complete
676 */
677
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100678 /* If the controller supports the LE Read Local P-256
679 * Public Key command, enable the corresponding event.
680 */
681 if (hdev->commands[34] & 0x02)
682 events[0] |= 0x80; /* LE Read Local P-256
683 * Public Key Complete
684 */
685
686 /* If the controller supports the LE Generate DHKey
687 * command, enable the corresponding event.
688 */
689 if (hdev->commands[34] & 0x04)
690 events[1] |= 0x01; /* LE Generate DHKey Complete */
691
Marcel Holtmann27bbca42017-05-01 23:54:18 -0700692 /* If the controller supports the LE Set Default PHY or
693 * LE Set PHY commands, enable the corresponding event.
694 */
695 if (hdev->commands[35] & (0x20 | 0x40))
696 events[1] |= 0x08; /* LE PHY Update Complete */
697
Andre Guedes9193c6e2014-07-01 18:10:09 -0300698 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
699 events);
700
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200701 if (hdev->commands[25] & 0x40) {
702 /* Read LE Advertising Channel TX Power */
703 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
704 }
705
Marcel Holtmann2ab216a2015-11-01 09:39:48 +0100706 if (hdev->commands[26] & 0x40) {
707 /* Read LE White List Size */
708 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
709 0, NULL);
710 }
711
712 if (hdev->commands[26] & 0x80) {
713 /* Clear LE White List */
714 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
715 }
716
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100717 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
718 /* Read LE Maximum Data Length */
719 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
720
721 /* Read LE Suggested Default Data Length */
722 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
723 }
724
Johan Hedberg42c6b122013-03-05 20:37:49 +0200725 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300726 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300727
728 /* Read features beyond page 1 if available */
729 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
730 struct hci_cp_read_local_ext_features cp;
731
732 cp.page = p;
733 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
734 sizeof(cp), &cp);
735 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200736
737 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200738}
739
Johan Hedberga1d01db2015-11-11 08:11:25 +0200740static int hci_init4_req(struct hci_request *req, unsigned long opt)
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300741{
742 struct hci_dev *hdev = req->hdev;
743
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800744 /* Some Broadcom based Bluetooth controllers do not support the
745 * Delete Stored Link Key command. They are clearly indicating its
746 * absence in the bit mask of supported commands.
747 *
748 * Check the supported commands and only if the the command is marked
749 * as supported send it. If not supported assume that the controller
750 * does not have actual support for stored link keys which makes this
751 * command redundant anyway.
752 *
753 * Some controllers indicate that they support handling deleting
754 * stored link keys, but they don't. The quirk lets a driver
755 * just disable this command.
756 */
757 if (hdev->commands[6] & 0x80 &&
758 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
759 struct hci_cp_delete_stored_link_key cp;
760
761 bacpy(&cp.bdaddr, BDADDR_ANY);
762 cp.delete_all = 0x01;
763 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
764 sizeof(cp), &cp);
765 }
766
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300767 /* Set event mask page 2 if the HCI command for it is supported */
768 if (hdev->commands[22] & 0x04)
769 hci_set_event_mask_page_2(req);
770
Marcel Holtmann109e3192014-07-23 19:24:56 +0200771 /* Read local codec list if the HCI command is supported */
772 if (hdev->commands[29] & 0x20)
773 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
774
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200775 /* Get MWS transport configuration if the HCI command is supported */
776 if (hdev->commands[30] & 0x08)
777 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
778
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300779 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800780 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300781 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800782
783 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700784 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800785 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800786 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800787
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800788 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
789 sizeof(support), &support);
790 }
Johan Hedberga1d01db2015-11-11 08:11:25 +0200791
Marcel Holtmann12204872017-05-01 21:43:24 -0700792 /* Set Suggested Default Data Length to maximum if supported */
793 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
794 struct hci_cp_le_write_def_data_len cp;
795
796 cp.tx_len = hdev->le_max_tx_len;
797 cp.tx_time = hdev->le_max_tx_time;
798 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
799 }
800
Marcel Holtmannde2ba302017-05-01 23:54:19 -0700801 /* Set Default PHY parameters if command is supported */
802 if (hdev->commands[35] & 0x20) {
803 struct hci_cp_le_set_default_phy cp;
804
805 /* No transmitter PHY or receiver PHY preferences */
806 cp.all_phys = 0x03;
807 cp.tx_phys = 0;
808 cp.rx_phys = 0;
809
810 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
811 }
812
Johan Hedberga1d01db2015-11-11 08:11:25 +0200813 return 0;
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300814}
815
Johan Hedberg2177bab2013-03-05 20:37:43 +0200816static int __hci_init(struct hci_dev *hdev)
817{
818 int err;
819
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200820 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200821 if (err < 0)
822 return err;
823
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200824 if (hci_dev_test_flag(hdev, HCI_SETUP))
825 hci_debugfs_create_basic(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700826
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200827 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200828 if (err < 0)
829 return err;
830
Marcel Holtmannca8bee52016-07-05 14:30:14 +0200831 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
Johan Hedberg0af801b2015-02-17 15:05:21 +0200832 * BR/EDR/LE type controllers. AMP controllers only need the
833 * first two stages of init.
834 */
Marcel Holtmannca8bee52016-07-05 14:30:14 +0200835 if (hdev->dev_type != HCI_PRIMARY)
Johan Hedberg0af801b2015-02-17 15:05:21 +0200836 return 0;
837
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200838 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300839 if (err < 0)
840 return err;
841
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200842 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700843 if (err < 0)
844 return err;
845
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800846 /* This function is only called when the controller is actually in
847 * configured state. When the controller is marked as unconfigured,
848 * this initialization procedure is not run.
849 *
850 * It means that it is possible that a controller runs through its
851 * setup phase and then discovers missing settings. If that is the
852 * case, then this function will not be called. It then will only
853 * be called during the config phase.
854 *
855 * So only when in setup phase or config phase, create the debugfs
856 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700857 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700858 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
859 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700860 return 0;
861
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100862 hci_debugfs_create_common(hdev);
863
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100864 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100865 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700866
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800867 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100868 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700869
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700870 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200871}
872
Johan Hedberga1d01db2015-11-11 08:11:25 +0200873static int hci_init0_req(struct hci_request *req, unsigned long opt)
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200874{
875 struct hci_dev *hdev = req->hdev;
876
877 BT_DBG("%s %ld", hdev->name, opt);
878
879 /* Reset */
880 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
881 hci_reset_req(req, 0);
882
883 /* Read Local Version */
884 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
885
886 /* Read BD Address */
887 if (hdev->set_bdaddr)
888 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200889
890 return 0;
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200891}
892
893static int __hci_unconf_init(struct hci_dev *hdev)
894{
895 int err;
896
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200897 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
898 return 0;
899
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200900 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200901 if (err < 0)
902 return err;
903
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200904 if (hci_dev_test_flag(hdev, HCI_SETUP))
905 hci_debugfs_create_basic(hdev);
906
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200907 return 0;
908}
909
Johan Hedberga1d01db2015-11-11 08:11:25 +0200910static int hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911{
912 __u8 scan = opt;
913
Johan Hedberg42c6b122013-03-05 20:37:49 +0200914 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
916 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200917 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200918 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919}
920
Johan Hedberga1d01db2015-11-11 08:11:25 +0200921static int hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922{
923 __u8 auth = opt;
924
Johan Hedberg42c6b122013-03-05 20:37:49 +0200925 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
927 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200928 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200929 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930}
931
Johan Hedberga1d01db2015-11-11 08:11:25 +0200932static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933{
934 __u8 encrypt = opt;
935
Johan Hedberg42c6b122013-03-05 20:37:49 +0200936 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200938 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200939 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200940 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941}
942
Johan Hedberga1d01db2015-11-11 08:11:25 +0200943static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200944{
945 __le16 policy = cpu_to_le16(opt);
946
Johan Hedberg42c6b122013-03-05 20:37:49 +0200947 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200948
949 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200951 return 0;
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200952}
953
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900954/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 * Device is held on return. */
956struct hci_dev *hci_dev_get(int index)
957{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200958 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959
960 BT_DBG("%d", index);
961
962 if (index < 0)
963 return NULL;
964
965 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200966 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 if (d->id == index) {
968 hdev = hci_dev_hold(d);
969 break;
970 }
971 }
972 read_unlock(&hci_dev_list_lock);
973 return hdev;
974}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
976/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200977
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200978bool hci_discovery_active(struct hci_dev *hdev)
979{
980 struct discovery_state *discov = &hdev->discovery;
981
Andre Guedes6fbe1952012-02-03 17:47:58 -0300982 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300983 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300984 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200985 return true;
986
Andre Guedes6fbe1952012-02-03 17:47:58 -0300987 default:
988 return false;
989 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200990}
991
Johan Hedbergff9ef572012-01-04 14:23:45 +0200992void hci_discovery_set_state(struct hci_dev *hdev, int state)
993{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300994 int old_state = hdev->discovery.state;
995
Johan Hedbergff9ef572012-01-04 14:23:45 +0200996 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
997
Johan Hedbergbb3e0a32014-07-07 13:24:58 +0300998 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +0200999 return;
1000
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001001 hdev->discovery.state = state;
1002
Johan Hedbergff9ef572012-01-04 14:23:45 +02001003 switch (state) {
1004 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001005 hci_update_background_scan(hdev);
1006
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001007 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001008 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001009 break;
1010 case DISCOVERY_STARTING:
1011 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001012 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001013 mgmt_discovering(hdev, 1);
1014 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001015 case DISCOVERY_RESOLVING:
1016 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001017 case DISCOVERY_STOPPING:
1018 break;
1019 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001020}
1021
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001022void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023{
Johan Hedberg30883512012-01-04 14:16:21 +02001024 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001025 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
Johan Hedberg561aafb2012-01-04 13:31:59 +02001027 list_for_each_entry_safe(p, n, &cache->all, all) {
1028 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001029 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001031
1032 INIT_LIST_HEAD(&cache->unknown);
1033 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034}
1035
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001036struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1037 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038{
Johan Hedberg30883512012-01-04 14:16:21 +02001039 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 struct inquiry_entry *e;
1041
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001042 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
Johan Hedberg561aafb2012-01-04 13:31:59 +02001044 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001046 return e;
1047 }
1048
1049 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050}
1051
Johan Hedberg561aafb2012-01-04 13:31:59 +02001052struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001053 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001054{
Johan Hedberg30883512012-01-04 14:16:21 +02001055 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001056 struct inquiry_entry *e;
1057
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001058 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001059
1060 list_for_each_entry(e, &cache->unknown, list) {
1061 if (!bacmp(&e->data.bdaddr, bdaddr))
1062 return e;
1063 }
1064
1065 return NULL;
1066}
1067
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001068struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001069 bdaddr_t *bdaddr,
1070 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001071{
1072 struct discovery_state *cache = &hdev->discovery;
1073 struct inquiry_entry *e;
1074
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001075 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001076
1077 list_for_each_entry(e, &cache->resolve, list) {
1078 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1079 return e;
1080 if (!bacmp(&e->data.bdaddr, bdaddr))
1081 return e;
1082 }
1083
1084 return NULL;
1085}
1086
Johan Hedberga3d4e202012-01-09 00:53:02 +02001087void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001088 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001089{
1090 struct discovery_state *cache = &hdev->discovery;
1091 struct list_head *pos = &cache->resolve;
1092 struct inquiry_entry *p;
1093
1094 list_del(&ie->list);
1095
1096 list_for_each_entry(p, &cache->resolve, list) {
1097 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001098 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001099 break;
1100 pos = &p->list;
1101 }
1102
1103 list_add(&ie->list, pos);
1104}
1105
Marcel Holtmannaf589252014-07-01 14:11:20 +02001106u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1107 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108{
Johan Hedberg30883512012-01-04 14:16:21 +02001109 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001110 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001111 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001113 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114
Johan Hedberg6928a922014-10-26 20:46:09 +01001115 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001116
Marcel Holtmannaf589252014-07-01 14:11:20 +02001117 if (!data->ssp_mode)
1118 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001119
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001120 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001121 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001122 if (!ie->data.ssp_mode)
1123 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001124
Johan Hedberga3d4e202012-01-09 00:53:02 +02001125 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001126 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001127 ie->data.rssi = data->rssi;
1128 hci_inquiry_cache_update_resolve(hdev, ie);
1129 }
1130
Johan Hedberg561aafb2012-01-04 13:31:59 +02001131 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001132 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001133
Johan Hedberg561aafb2012-01-04 13:31:59 +02001134 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001135 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001136 if (!ie) {
1137 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1138 goto done;
1139 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001140
1141 list_add(&ie->all, &cache->all);
1142
1143 if (name_known) {
1144 ie->name_state = NAME_KNOWN;
1145 } else {
1146 ie->name_state = NAME_NOT_KNOWN;
1147 list_add(&ie->list, &cache->unknown);
1148 }
1149
1150update:
1151 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001152 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001153 ie->name_state = NAME_KNOWN;
1154 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 }
1156
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001157 memcpy(&ie->data, data, sizeof(*data));
1158 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001160
1161 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001162 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001163
Marcel Holtmannaf589252014-07-01 14:11:20 +02001164done:
1165 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166}
1167
1168static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1169{
Johan Hedberg30883512012-01-04 14:16:21 +02001170 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 struct inquiry_info *info = (struct inquiry_info *) buf;
1172 struct inquiry_entry *e;
1173 int copied = 0;
1174
Johan Hedberg561aafb2012-01-04 13:31:59 +02001175 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001177
1178 if (copied >= num)
1179 break;
1180
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 bacpy(&info->bdaddr, &data->bdaddr);
1182 info->pscan_rep_mode = data->pscan_rep_mode;
1183 info->pscan_period_mode = data->pscan_period_mode;
1184 info->pscan_mode = data->pscan_mode;
1185 memcpy(info->dev_class, data->dev_class, 3);
1186 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001187
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001189 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 }
1191
1192 BT_DBG("cache %p, copied %d", cache, copied);
1193 return copied;
1194}
1195
Johan Hedberga1d01db2015-11-11 08:11:25 +02001196static int hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197{
1198 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001199 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 struct hci_cp_inquiry cp;
1201
1202 BT_DBG("%s", hdev->name);
1203
1204 if (test_bit(HCI_INQUIRY, &hdev->flags))
Johan Hedberga1d01db2015-11-11 08:11:25 +02001205 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
1207 /* Start Inquiry */
1208 memcpy(&cp.lap, &ir->lap, 3);
1209 cp.length = ir->length;
1210 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001212
1213 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214}
1215
1216int hci_inquiry(void __user *arg)
1217{
1218 __u8 __user *ptr = arg;
1219 struct hci_inquiry_req ir;
1220 struct hci_dev *hdev;
1221 int err = 0, do_inquiry = 0, max_rsp;
1222 long timeo;
1223 __u8 *buf;
1224
1225 if (copy_from_user(&ir, ptr, sizeof(ir)))
1226 return -EFAULT;
1227
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001228 hdev = hci_dev_get(ir.dev_id);
1229 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 return -ENODEV;
1231
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001232 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001233 err = -EBUSY;
1234 goto done;
1235 }
1236
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001237 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001238 err = -EOPNOTSUPP;
1239 goto done;
1240 }
1241
Marcel Holtmannca8bee52016-07-05 14:30:14 +02001242 if (hdev->dev_type != HCI_PRIMARY) {
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001243 err = -EOPNOTSUPP;
1244 goto done;
1245 }
1246
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001247 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001248 err = -EOPNOTSUPP;
1249 goto done;
1250 }
1251
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001252 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001253 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001254 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001255 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 do_inquiry = 1;
1257 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001258 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
Marcel Holtmann04837f62006-07-03 10:02:33 +02001260 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001261
1262 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001263 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001264 timeo, NULL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001265 if (err < 0)
1266 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001267
1268 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1269 * cleared). If it is interrupted by a signal, return -EINTR.
1270 */
NeilBrown74316202014-07-07 15:16:04 +10001271 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001272 TASK_INTERRUPTIBLE))
1273 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001274 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001276 /* for unlimited number of responses we will use buffer with
1277 * 255 entries
1278 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1280
1281 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1282 * copy it to the user space.
1283 */
Kees Cook6da2ec52018-06-12 13:55:00 -07001284 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001285 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 err = -ENOMEM;
1287 goto done;
1288 }
1289
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001290 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001292 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
1294 BT_DBG("num_rsp %d", ir.num_rsp);
1295
1296 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1297 ptr += sizeof(ir);
1298 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001299 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001301 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 err = -EFAULT;
1303
1304 kfree(buf);
1305
1306done:
1307 hci_dev_put(hdev);
1308 return err;
1309}
1310
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001311static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 int ret = 0;
1314
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 BT_DBG("%s %p", hdev->name, hdev);
1316
Johan Hedbergb5044302015-11-10 09:44:55 +02001317 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001319 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001320 ret = -ENODEV;
1321 goto done;
1322 }
1323
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001324 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1325 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001326 /* Check for rfkill but allow the HCI setup stage to
1327 * proceed (which in itself doesn't cause any RF activity).
1328 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001329 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001330 ret = -ERFKILL;
1331 goto done;
1332 }
1333
1334 /* Check for valid public address or a configured static
1335 * random adddress, but let the HCI setup proceed to
1336 * be able to determine if there is a public address
1337 * or not.
1338 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001339 * In case of user channel usage, it is not important
1340 * if a public address or static random address is
1341 * available.
1342 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001343 * This check is only valid for BR/EDR controllers
1344 * since AMP controllers do not have an address.
1345 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001346 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannca8bee52016-07-05 14:30:14 +02001347 hdev->dev_type == HCI_PRIMARY &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001348 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1349 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1350 ret = -EADDRNOTAVAIL;
1351 goto done;
1352 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001353 }
1354
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 if (test_bit(HCI_UP, &hdev->flags)) {
1356 ret = -EALREADY;
1357 goto done;
1358 }
1359
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 if (hdev->open(hdev)) {
1361 ret = -EIO;
1362 goto done;
1363 }
1364
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001365 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001366 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001367
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001368 atomic_set(&hdev->cmd_cnt, 1);
1369 set_bit(HCI_INIT, &hdev->flags);
1370
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001371 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmanne131d742015-10-20 02:30:47 +02001372 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1373
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001374 if (hdev->setup)
1375 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001376
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001377 /* The transport driver can set these quirks before
1378 * creating the HCI device or in its setup callback.
1379 *
1380 * In case any of them is set, the controller has to
1381 * start up as unconfigured.
1382 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001383 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1384 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001385 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001386
1387 /* For an unconfigured controller it is required to
1388 * read at least the version information provided by
1389 * the Read Local Version Information command.
1390 *
1391 * If the set_bdaddr driver callback is provided, then
1392 * also the original Bluetooth public device address
1393 * will be read using the Read BD Address command.
1394 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001395 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001396 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001397 }
1398
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001399 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001400 /* If public address change is configured, ensure that
1401 * the address gets programmed. If the driver does not
1402 * support changing the public address, fail the power
1403 * on procedure.
1404 */
1405 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1406 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001407 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1408 else
1409 ret = -EADDRNOTAVAIL;
1410 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001411
1412 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001413 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001414 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001415 ret = __hci_init(hdev);
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001416 if (!ret && hdev->post_init)
1417 ret = hdev->post_init(hdev);
1418 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 }
1420
Marcel Holtmann7e995b92015-10-17 16:00:26 +02001421 /* If the HCI Reset command is clearing all diagnostic settings,
1422 * then they need to be reprogrammed after the init procedure
1423 * completed.
1424 */
1425 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
Marcel Holtmannb56c7b22017-05-02 12:43:31 -07001426 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann7e995b92015-10-17 16:00:26 +02001427 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1428 ret = hdev->set_diag(hdev, true);
1429
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001430 clear_bit(HCI_INIT, &hdev->flags);
1431
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 if (!ret) {
1433 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001434 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 set_bit(HCI_UP, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001436 hci_sock_dev_event(hdev, HCI_DEV_UP);
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +01001437 hci_leds_update_powered(hdev, true);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001438 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1439 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1440 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1441 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Johan Hedberg2ff13892015-11-25 16:15:44 +02001442 hci_dev_test_flag(hdev, HCI_MGMT) &&
Marcel Holtmannca8bee52016-07-05 14:30:14 +02001443 hdev->dev_type == HCI_PRIMARY) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02001444 ret = __hci_req_hci_power_on(hdev);
1445 mgmt_power_on(hdev, ret);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001446 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001447 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001449 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001450 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001451 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
1453 skb_queue_purge(&hdev->cmd_q);
1454 skb_queue_purge(&hdev->rx_q);
1455
1456 if (hdev->flush)
1457 hdev->flush(hdev);
1458
1459 if (hdev->sent_cmd) {
1460 kfree_skb(hdev->sent_cmd);
1461 hdev->sent_cmd = NULL;
1462 }
1463
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001464 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001465 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001466
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001468 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 }
1470
1471done:
Johan Hedbergb5044302015-11-10 09:44:55 +02001472 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 return ret;
1474}
1475
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001476/* ---- HCI ioctl helpers ---- */
1477
1478int hci_dev_open(__u16 dev)
1479{
1480 struct hci_dev *hdev;
1481 int err;
1482
1483 hdev = hci_dev_get(dev);
1484 if (!hdev)
1485 return -ENODEV;
1486
Marcel Holtmann4a964402014-07-02 19:10:33 +02001487 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001488 * up as user channel. Trying to bring them up as normal devices
1489 * will result into a failure. Only user channel operation is
1490 * possible.
1491 *
1492 * When this function is called for a user channel, the flag
1493 * HCI_USER_CHANNEL will be set first before attempting to
1494 * open the device.
1495 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001496 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1497 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001498 err = -EOPNOTSUPP;
1499 goto done;
1500 }
1501
Johan Hedberge1d08f42013-10-01 22:44:50 +03001502 /* We need to ensure that no other power on/off work is pending
1503 * before proceeding to call hci_dev_do_open. This is
1504 * particularly important if the setup procedure has not yet
1505 * completed.
1506 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001507 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001508 cancel_delayed_work(&hdev->power_off);
1509
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001510 /* After this call it is guaranteed that the setup procedure
1511 * has finished. This means that error conditions like RFKILL
1512 * or no valid public or static random address apply.
1513 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001514 flush_workqueue(hdev->req_workqueue);
1515
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001516 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001517 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001518 * so that pairing works for them. Once the management interface
1519 * is in use this bit will be cleared again and userspace has
1520 * to explicitly enable it.
1521 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001522 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1523 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001524 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001525
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001526 err = hci_dev_do_open(hdev);
1527
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001528done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001529 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001530 return err;
1531}
1532
Johan Hedbergd7347f32014-07-04 12:37:23 +03001533/* This function requires the caller holds hdev->lock */
1534static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1535{
1536 struct hci_conn_params *p;
1537
Johan Hedbergf161dd42014-08-15 21:06:54 +03001538 list_for_each_entry(p, &hdev->le_conn_params, list) {
1539 if (p->conn) {
1540 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001541 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001542 p->conn = NULL;
1543 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001544 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001545 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001546
1547 BT_DBG("All LE pending actions cleared");
1548}
1549
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001550int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551{
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001552 bool auto_off;
1553
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 BT_DBG("%s %p", hdev->name, hdev);
1555
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001556 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001557 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001558 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001559 /* Execute vendor specific shutdown routine */
1560 if (hdev->shutdown)
1561 hdev->shutdown(hdev);
1562 }
1563
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001564 cancel_delayed_work(&hdev->power_off);
1565
Johan Hedberg7df0f732015-11-12 15:15:00 +02001566 hci_request_cancel_all(hdev);
Johan Hedbergb5044302015-11-10 09:44:55 +02001567 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
1569 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001570 cancel_delayed_work_sync(&hdev->cmd_timer);
Johan Hedbergb5044302015-11-10 09:44:55 +02001571 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 return 0;
1573 }
1574
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +01001575 hci_leds_update_powered(hdev, false);
1576
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001577 /* Flush RX and TX works */
1578 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001579 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001581 if (hdev->discov_timeout > 0) {
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001582 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001583 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1584 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001585 }
1586
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001587 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001588 cancel_delayed_work(&hdev->service_cache);
1589
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001590 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001591 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001592
Johan Hedberg76727c02014-11-18 09:00:14 +02001593 /* Avoid potential lockdep warnings from the *_flush() calls by
1594 * ensuring the workqueue is empty up front.
1595 */
1596 drain_workqueue(hdev->workqueue);
1597
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001598 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001599
Johan Hedberg8f502f82015-01-28 19:56:02 +02001600 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1601
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001602 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1603
Marcel Holtmannca8bee52016-07-05 14:30:14 +02001604 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
Marcel Holtmannbaab7932016-09-04 05:13:46 +02001605 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Johan Hedberg2ff13892015-11-25 16:15:44 +02001606 hci_dev_test_flag(hdev, HCI_MGMT))
1607 __mgmt_power_off(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001608
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001609 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001610 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001611 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001612 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
Marcel Holtmann64dae962015-01-28 14:10:28 -08001614 smp_unregister(hdev);
1615
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001616 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617
1618 if (hdev->flush)
1619 hdev->flush(hdev);
1620
1621 /* Reset device */
1622 skb_queue_purge(&hdev->cmd_q);
1623 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001624 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1625 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001627 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 clear_bit(HCI_INIT, &hdev->flags);
1629 }
1630
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001631 /* flush cmd work */
1632 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
1634 /* Drop queues */
1635 skb_queue_purge(&hdev->rx_q);
1636 skb_queue_purge(&hdev->cmd_q);
1637 skb_queue_purge(&hdev->raw_q);
1638
1639 /* Drop last sent command */
1640 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001641 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 kfree_skb(hdev->sent_cmd);
1643 hdev->sent_cmd = NULL;
1644 }
1645
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001646 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001647 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001648
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 /* After this point our queues are empty
1650 * and no tasks are scheduled. */
1651 hdev->close(hdev);
1652
Johan Hedberg35b973c2013-03-15 17:06:59 -05001653 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001654 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001655 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001656
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001657 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001658 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001659
Johan Hedberge59fda82012-02-22 18:11:53 +02001660 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001661 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001662 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001663
Johan Hedbergb5044302015-11-10 09:44:55 +02001664 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665
1666 hci_dev_put(hdev);
1667 return 0;
1668}
1669
1670int hci_dev_close(__u16 dev)
1671{
1672 struct hci_dev *hdev;
1673 int err;
1674
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001675 hdev = hci_dev_get(dev);
1676 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001678
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001680 err = -EBUSY;
1681 goto done;
1682 }
1683
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001684 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001685 cancel_delayed_work(&hdev->power_off);
1686
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001688
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001689done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 hci_dev_put(hdev);
1691 return err;
1692}
1693
Marcel Holtmann5c912492015-01-28 11:53:05 -08001694static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001696 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
Marcel Holtmann5c912492015-01-28 11:53:05 -08001698 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
Johan Hedbergb5044302015-11-10 09:44:55 +02001700 hci_req_sync_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 /* Drop queues */
1703 skb_queue_purge(&hdev->rx_q);
1704 skb_queue_purge(&hdev->cmd_q);
1705
Johan Hedberg76727c02014-11-18 09:00:14 +02001706 /* Avoid potential lockdep warnings from the *_flush() calls by
1707 * ensuring the workqueue is empty up front.
1708 */
1709 drain_workqueue(hdev->workqueue);
1710
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001711 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001712 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001714 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
1716 if (hdev->flush)
1717 hdev->flush(hdev);
1718
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001719 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001720 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001722 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Johan Hedbergb5044302015-11-10 09:44:55 +02001724 hci_req_sync_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 return ret;
1726}
1727
Marcel Holtmann5c912492015-01-28 11:53:05 -08001728int hci_dev_reset(__u16 dev)
1729{
1730 struct hci_dev *hdev;
1731 int err;
1732
1733 hdev = hci_dev_get(dev);
1734 if (!hdev)
1735 return -ENODEV;
1736
1737 if (!test_bit(HCI_UP, &hdev->flags)) {
1738 err = -ENETDOWN;
1739 goto done;
1740 }
1741
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001742 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001743 err = -EBUSY;
1744 goto done;
1745 }
1746
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001747 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001748 err = -EOPNOTSUPP;
1749 goto done;
1750 }
1751
1752 err = hci_dev_do_reset(hdev);
1753
1754done:
1755 hci_dev_put(hdev);
1756 return err;
1757}
1758
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759int hci_dev_reset_stat(__u16 dev)
1760{
1761 struct hci_dev *hdev;
1762 int ret = 0;
1763
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001764 hdev = hci_dev_get(dev);
1765 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 return -ENODEV;
1767
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001768 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001769 ret = -EBUSY;
1770 goto done;
1771 }
1772
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001773 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001774 ret = -EOPNOTSUPP;
1775 goto done;
1776 }
1777
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1779
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001780done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 return ret;
1783}
1784
Johan Hedberg123abc02014-07-10 12:09:07 +03001785static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1786{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001787 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001788
1789 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1790
1791 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001792 conn_changed = !hci_dev_test_and_set_flag(hdev,
1793 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001794 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001795 conn_changed = hci_dev_test_and_clear_flag(hdev,
1796 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001797
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001798 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001799 discov_changed = !hci_dev_test_and_set_flag(hdev,
1800 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001801 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001802 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001803 discov_changed = hci_dev_test_and_clear_flag(hdev,
1804 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001805 }
1806
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001807 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001808 return;
1809
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001810 if (conn_changed || discov_changed) {
1811 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001812 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001813
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001814 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02001815 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001816
Johan Hedberg123abc02014-07-10 12:09:07 +03001817 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001818 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001819}
1820
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821int hci_dev_cmd(unsigned int cmd, void __user *arg)
1822{
1823 struct hci_dev *hdev;
1824 struct hci_dev_req dr;
1825 int err = 0;
1826
1827 if (copy_from_user(&dr, arg, sizeof(dr)))
1828 return -EFAULT;
1829
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001830 hdev = hci_dev_get(dr.dev_id);
1831 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 return -ENODEV;
1833
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001834 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001835 err = -EBUSY;
1836 goto done;
1837 }
1838
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001839 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001840 err = -EOPNOTSUPP;
1841 goto done;
1842 }
1843
Marcel Holtmannca8bee52016-07-05 14:30:14 +02001844 if (hdev->dev_type != HCI_PRIMARY) {
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001845 err = -EOPNOTSUPP;
1846 goto done;
1847 }
1848
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001849 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001850 err = -EOPNOTSUPP;
1851 goto done;
1852 }
1853
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 switch (cmd) {
1855 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001856 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001857 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 break;
1859
1860 case HCISETENCRYPT:
1861 if (!lmp_encrypt_capable(hdev)) {
1862 err = -EOPNOTSUPP;
1863 break;
1864 }
1865
1866 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1867 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001868 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001869 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 if (err)
1871 break;
1872 }
1873
Johan Hedberg01178cd2013-03-05 20:37:41 +02001874 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001875 HCI_INIT_TIMEOUT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 break;
1877
1878 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001879 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001880 HCI_INIT_TIMEOUT, NULL);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001881
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001882 /* Ensure that the connectable and discoverable states
1883 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001884 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001885 if (!err)
1886 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 break;
1888
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001889 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001890 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
Johan Hedberg4ebeee22015-11-11 08:11:19 +02001891 HCI_INIT_TIMEOUT, NULL);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001892 break;
1893
1894 case HCISETLINKMODE:
1895 hdev->link_mode = ((__u16) dr.dev_opt) &
1896 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1897 break;
1898
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 case HCISETPTYPE:
1900 hdev->pkt_type = (__u16) dr.dev_opt;
1901 break;
1902
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001904 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1905 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 break;
1907
1908 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001909 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1910 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 break;
1912
1913 default:
1914 err = -EINVAL;
1915 break;
1916 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001917
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001918done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 hci_dev_put(hdev);
1920 return err;
1921}
1922
1923int hci_get_dev_list(void __user *arg)
1924{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001925 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 struct hci_dev_list_req *dl;
1927 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 int n = 0, size, err;
1929 __u16 dev_num;
1930
1931 if (get_user(dev_num, (__u16 __user *) arg))
1932 return -EFAULT;
1933
1934 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1935 return -EINVAL;
1936
1937 size = sizeof(*dl) + dev_num * sizeof(*dr);
1938
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001939 dl = kzalloc(size, GFP_KERNEL);
1940 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 return -ENOMEM;
1942
1943 dr = dl->dev_req;
1944
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001945 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001946 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001947 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001948
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001949 /* When the auto-off is configured it means the transport
1950 * is running, but in that case still indicate that the
1951 * device is actually down.
1952 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001953 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001954 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001955
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001957 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001958
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 if (++n >= dev_num)
1960 break;
1961 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001962 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
1964 dl->dev_num = n;
1965 size = sizeof(*dl) + n * sizeof(*dr);
1966
1967 err = copy_to_user(arg, dl, size);
1968 kfree(dl);
1969
1970 return err ? -EFAULT : 0;
1971}
1972
1973int hci_get_dev_info(void __user *arg)
1974{
1975 struct hci_dev *hdev;
1976 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001977 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 int err = 0;
1979
1980 if (copy_from_user(&di, arg, sizeof(di)))
1981 return -EFAULT;
1982
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001983 hdev = hci_dev_get(di.dev_id);
1984 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 return -ENODEV;
1986
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001987 /* When the auto-off is configured it means the transport
1988 * is running, but in that case still indicate that the
1989 * device is actually down.
1990 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001991 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001992 flags = hdev->flags & ~BIT(HCI_UP);
1993 else
1994 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001995
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 strcpy(di.name, hdev->name);
1997 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001998 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001999 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002001 if (lmp_bredr_capable(hdev)) {
2002 di.acl_mtu = hdev->acl_mtu;
2003 di.acl_pkts = hdev->acl_pkts;
2004 di.sco_mtu = hdev->sco_mtu;
2005 di.sco_pkts = hdev->sco_pkts;
2006 } else {
2007 di.acl_mtu = hdev->le_mtu;
2008 di.acl_pkts = hdev->le_pkts;
2009 di.sco_mtu = 0;
2010 di.sco_pkts = 0;
2011 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 di.link_policy = hdev->link_policy;
2013 di.link_mode = hdev->link_mode;
2014
2015 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2016 memcpy(&di.features, &hdev->features, sizeof(di.features));
2017
2018 if (copy_to_user(arg, &di, sizeof(di)))
2019 err = -EFAULT;
2020
2021 hci_dev_put(hdev);
2022
2023 return err;
2024}
2025
2026/* ---- Interface to HCI drivers ---- */
2027
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002028static int hci_rfkill_set_block(void *data, bool blocked)
2029{
2030 struct hci_dev *hdev = data;
2031
2032 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2033
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002034 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002035 return -EBUSY;
2036
Johan Hedberg5e130362013-09-13 08:58:17 +03002037 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002038 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002039 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2040 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002041 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002042 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002043 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002044 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002045
2046 return 0;
2047}
2048
2049static const struct rfkill_ops hci_rfkill_ops = {
2050 .set_block = hci_rfkill_set_block,
2051};
2052
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002053static void hci_power_on(struct work_struct *work)
2054{
2055 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002056 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002057
2058 BT_DBG("%s", hdev->name);
2059
Johan Hedberg2ff13892015-11-25 16:15:44 +02002060 if (test_bit(HCI_UP, &hdev->flags) &&
2061 hci_dev_test_flag(hdev, HCI_MGMT) &&
2062 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
Wei-Ning Huangd82142a2016-02-15 17:09:51 +08002063 cancel_delayed_work(&hdev->power_off);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002064 hci_req_sync_lock(hdev);
2065 err = __hci_req_hci_power_on(hdev);
2066 hci_req_sync_unlock(hdev);
2067 mgmt_power_on(hdev, err);
2068 return;
2069 }
2070
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002071 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002072 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302073 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002074 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302075 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002076 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002077 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002078
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002079 /* During the HCI setup phase, a few error conditions are
2080 * ignored and they need to be checked now. If they are still
2081 * valid, it is important to turn the device back off.
2082 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002083 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2084 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmannca8bee52016-07-05 14:30:14 +02002085 (hdev->dev_type == HCI_PRIMARY &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002086 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2087 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002088 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002089 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002090 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002091 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2092 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002093 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002094
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002095 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002096 /* For unconfigured devices, set the HCI_RAW flag
2097 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002098 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002099 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002100 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002101
2102 /* For fully configured devices, this will send
2103 * the Index Added event. For unconfigured devices,
2104 * it will send Unconfigued Index Added event.
2105 *
2106 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2107 * and no event will be send.
2108 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002109 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002110 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002111 /* When the controller is now configured, then it
2112 * is important to clear the HCI_RAW flag.
2113 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002114 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002115 clear_bit(HCI_RAW, &hdev->flags);
2116
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002117 /* Powering on the controller with HCI_CONFIG set only
2118 * happens with the transition from unconfigured to
2119 * configured. This will send the Index Added event.
2120 */
2121 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002122 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002123}
2124
2125static void hci_power_off(struct work_struct *work)
2126{
Johan Hedberg32435532011-11-07 22:16:04 +02002127 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002128 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002129
2130 BT_DBG("%s", hdev->name);
2131
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002132 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002133}
2134
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002135static void hci_error_reset(struct work_struct *work)
2136{
2137 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2138
2139 BT_DBG("%s", hdev->name);
2140
2141 if (hdev->hw_error)
2142 hdev->hw_error(hdev, hdev->hw_error_code);
2143 else
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002144 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002145
2146 if (hci_dev_do_close(hdev))
2147 return;
2148
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002149 hci_dev_do_open(hdev);
2150}
2151
Johan Hedberg35f74982014-02-18 17:14:32 +02002152void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002153{
Johan Hedberg48210022013-01-27 00:31:28 +02002154 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002155
Johan Hedberg48210022013-01-27 00:31:28 +02002156 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2157 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002158 kfree(uuid);
2159 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002160}
2161
Johan Hedberg35f74982014-02-18 17:14:32 +02002162void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002163{
Johan Hedberg0378b592014-11-19 15:22:22 +02002164 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002165
Johan Hedberg0378b592014-11-19 15:22:22 +02002166 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2167 list_del_rcu(&key->list);
2168 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002169 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002170}
2171
Johan Hedberg35f74982014-02-18 17:14:32 +02002172void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002173{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002174 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002175
Johan Hedberg970d0f12014-11-13 14:37:47 +02002176 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2177 list_del_rcu(&k->list);
2178 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002179 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002180}
2181
Johan Hedberg970c4e42014-02-18 10:19:33 +02002182void hci_smp_irks_clear(struct hci_dev *hdev)
2183{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002184 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002185
Johan Hedbergadae20c2014-11-13 14:37:48 +02002186 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2187 list_del_rcu(&k->list);
2188 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002189 }
2190}
2191
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002192struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2193{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002194 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002195
Johan Hedberg0378b592014-11-19 15:22:22 +02002196 rcu_read_lock();
2197 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2198 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2199 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002200 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002201 }
2202 }
2203 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002204
2205 return NULL;
2206}
2207
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302208static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002209 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002210{
2211 /* Legacy key */
2212 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302213 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002214
2215 /* Debug keys are insecure so don't store them persistently */
2216 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302217 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002218
2219 /* Changed combination key and there's no previous one */
2220 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302221 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002222
2223 /* Security mode 3 case */
2224 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302225 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002226
Johan Hedberge3befab2014-06-01 16:33:39 +03002227 /* BR/EDR key derived using SC from an LE link */
2228 if (conn->type == LE_LINK)
2229 return true;
2230
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002231 /* Neither local nor remote side had no-bonding as requirement */
2232 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302233 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002234
2235 /* Local side had dedicated bonding as requirement */
2236 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302237 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002238
2239 /* Remote side had dedicated bonding as requirement */
2240 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302241 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002242
2243 /* If none of the above criteria match, then don't store the key
2244 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302245 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002246}
2247
Johan Hedberge804d252014-07-16 11:42:28 +03002248static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002249{
Johan Hedberge804d252014-07-16 11:42:28 +03002250 if (type == SMP_LTK)
2251 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002252
Johan Hedberge804d252014-07-16 11:42:28 +03002253 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002254}
2255
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002256struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2257 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002258{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002259 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002260
Johan Hedberg970d0f12014-11-13 14:37:47 +02002261 rcu_read_lock();
2262 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002263 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2264 continue;
2265
Johan Hedberg923e2412014-12-03 12:43:39 +02002266 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002267 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002268 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002269 }
2270 }
2271 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002272
2273 return NULL;
2274}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002275
Johan Hedberg970c4e42014-02-18 10:19:33 +02002276struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2277{
2278 struct smp_irk *irk;
2279
Johan Hedbergadae20c2014-11-13 14:37:48 +02002280 rcu_read_lock();
2281 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2282 if (!bacmp(&irk->rpa, rpa)) {
2283 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002284 return irk;
2285 }
2286 }
2287
Johan Hedbergadae20c2014-11-13 14:37:48 +02002288 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2289 if (smp_irk_matches(hdev, irk->val, rpa)) {
2290 bacpy(&irk->rpa, rpa);
2291 rcu_read_unlock();
2292 return irk;
2293 }
2294 }
2295 rcu_read_unlock();
2296
Johan Hedberg970c4e42014-02-18 10:19:33 +02002297 return NULL;
2298}
2299
2300struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2301 u8 addr_type)
2302{
2303 struct smp_irk *irk;
2304
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002305 /* Identity Address must be public or static random */
2306 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2307 return NULL;
2308
Johan Hedbergadae20c2014-11-13 14:37:48 +02002309 rcu_read_lock();
2310 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002311 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002312 bacmp(bdaddr, &irk->bdaddr) == 0) {
2313 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002314 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002315 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002316 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002317 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002318
2319 return NULL;
2320}
2321
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002322struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002323 bdaddr_t *bdaddr, u8 *val, u8 type,
2324 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002325{
2326 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302327 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002328
2329 old_key = hci_find_link_key(hdev, bdaddr);
2330 if (old_key) {
2331 old_key_type = old_key->type;
2332 key = old_key;
2333 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002334 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002335 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002336 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002337 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002338 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002339 }
2340
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002341 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002342
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002343 /* Some buggy controller combinations generate a changed
2344 * combination key for legacy pairing even when there's no
2345 * previous key */
2346 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002347 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002348 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002349 if (conn)
2350 conn->key_type = type;
2351 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002352
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002353 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002354 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002355 key->pin_len = pin_len;
2356
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002357 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002358 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002359 else
2360 key->type = type;
2361
Johan Hedberg7652ff62014-06-24 13:15:49 +03002362 if (persistent)
2363 *persistent = hci_persistent_key(hdev, conn, type,
2364 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002365
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002366 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002367}
2368
Johan Hedbergca9142b2014-02-19 14:57:44 +02002369struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002370 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002371 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002372{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002373 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002374 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002375
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002376 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002377 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002378 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002379 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002380 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002381 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002382 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002383 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002384 }
2385
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002386 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002387 key->bdaddr_type = addr_type;
2388 memcpy(key->val, tk, sizeof(key->val));
2389 key->authenticated = authenticated;
2390 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002391 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002392 key->enc_size = enc_size;
2393 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002394
Johan Hedbergca9142b2014-02-19 14:57:44 +02002395 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002396}
2397
Johan Hedbergca9142b2014-02-19 14:57:44 +02002398struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2399 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002400{
2401 struct smp_irk *irk;
2402
2403 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2404 if (!irk) {
2405 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2406 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002407 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002408
2409 bacpy(&irk->bdaddr, bdaddr);
2410 irk->addr_type = addr_type;
2411
Johan Hedbergadae20c2014-11-13 14:37:48 +02002412 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002413 }
2414
2415 memcpy(irk->val, val, 16);
2416 bacpy(&irk->rpa, rpa);
2417
Johan Hedbergca9142b2014-02-19 14:57:44 +02002418 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002419}
2420
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002421int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2422{
2423 struct link_key *key;
2424
2425 key = hci_find_link_key(hdev, bdaddr);
2426 if (!key)
2427 return -ENOENT;
2428
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002429 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002430
Johan Hedberg0378b592014-11-19 15:22:22 +02002431 list_del_rcu(&key->list);
2432 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002433
2434 return 0;
2435}
2436
Johan Hedberge0b2b272014-02-18 17:14:31 +02002437int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002438{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002439 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002440 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002441
Johan Hedberg970d0f12014-11-13 14:37:47 +02002442 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002443 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002444 continue;
2445
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002446 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002447
Johan Hedberg970d0f12014-11-13 14:37:47 +02002448 list_del_rcu(&k->list);
2449 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002450 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002451 }
2452
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002453 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002454}
2455
Johan Hedberga7ec7332014-02-18 17:14:35 +02002456void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2457{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002458 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002459
Johan Hedbergadae20c2014-11-13 14:37:48 +02002460 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002461 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2462 continue;
2463
2464 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2465
Johan Hedbergadae20c2014-11-13 14:37:48 +02002466 list_del_rcu(&k->list);
2467 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002468 }
2469}
2470
Johan Hedberg55e76b32015-03-10 22:34:40 +02002471bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2472{
2473 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002474 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002475 u8 addr_type;
2476
2477 if (type == BDADDR_BREDR) {
2478 if (hci_find_link_key(hdev, bdaddr))
2479 return true;
2480 return false;
2481 }
2482
2483 /* Convert to HCI addr type which struct smp_ltk uses */
2484 if (type == BDADDR_LE_PUBLIC)
2485 addr_type = ADDR_LE_DEV_PUBLIC;
2486 else
2487 addr_type = ADDR_LE_DEV_RANDOM;
2488
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002489 irk = hci_get_irk(hdev, bdaddr, addr_type);
2490 if (irk) {
2491 bdaddr = &irk->bdaddr;
2492 addr_type = irk->addr_type;
2493 }
2494
Johan Hedberg55e76b32015-03-10 22:34:40 +02002495 rcu_read_lock();
2496 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002497 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2498 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002499 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002500 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002501 }
2502 rcu_read_unlock();
2503
2504 return false;
2505}
2506
Ville Tervo6bd32322011-02-16 16:32:41 +02002507/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002508static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002509{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002510 struct hci_dev *hdev = container_of(work, struct hci_dev,
2511 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002512
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002513 if (hdev->sent_cmd) {
2514 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2515 u16 opcode = __le16_to_cpu(sent->opcode);
2516
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002517 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002518 } else {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002519 bt_dev_err(hdev, "command tx timeout");
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002520 }
2521
Ville Tervo6bd32322011-02-16 16:32:41 +02002522 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002523 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002524}
2525
Szymon Janc2763eda2011-03-22 13:12:22 +01002526struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002527 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002528{
2529 struct oob_data *data;
2530
Johan Hedberg6928a922014-10-26 20:46:09 +01002531 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2532 if (bacmp(bdaddr, &data->bdaddr) != 0)
2533 continue;
2534 if (data->bdaddr_type != bdaddr_type)
2535 continue;
2536 return data;
2537 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002538
2539 return NULL;
2540}
2541
Johan Hedberg6928a922014-10-26 20:46:09 +01002542int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2543 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002544{
2545 struct oob_data *data;
2546
Johan Hedberg6928a922014-10-26 20:46:09 +01002547 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002548 if (!data)
2549 return -ENOENT;
2550
Johan Hedberg6928a922014-10-26 20:46:09 +01002551 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002552
2553 list_del(&data->list);
2554 kfree(data);
2555
2556 return 0;
2557}
2558
Johan Hedberg35f74982014-02-18 17:14:32 +02002559void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002560{
2561 struct oob_data *data, *n;
2562
2563 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2564 list_del(&data->list);
2565 kfree(data);
2566 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002567}
2568
Marcel Holtmann07988722014-01-10 02:07:29 -08002569int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002570 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002571 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002572{
2573 struct oob_data *data;
2574
Johan Hedberg6928a922014-10-26 20:46:09 +01002575 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002576 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002577 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002578 if (!data)
2579 return -ENOMEM;
2580
2581 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002582 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002583 list_add(&data->list, &hdev->remote_oob_data);
2584 }
2585
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002586 if (hash192 && rand192) {
2587 memcpy(data->hash192, hash192, sizeof(data->hash192));
2588 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002589 if (hash256 && rand256)
2590 data->present = 0x03;
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002591 } else {
2592 memset(data->hash192, 0, sizeof(data->hash192));
2593 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002594 if (hash256 && rand256)
2595 data->present = 0x02;
2596 else
2597 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002598 }
2599
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002600 if (hash256 && rand256) {
2601 memcpy(data->hash256, hash256, sizeof(data->hash256));
2602 memcpy(data->rand256, rand256, sizeof(data->rand256));
2603 } else {
2604 memset(data->hash256, 0, sizeof(data->hash256));
2605 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002606 if (hash192 && rand192)
2607 data->present = 0x01;
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002608 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002609
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002610 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002611
2612 return 0;
2613}
2614
Florian Grandeld2609b32015-06-18 03:16:34 +02002615/* This function requires the caller holds hdev->lock */
2616struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2617{
2618 struct adv_info *adv_instance;
2619
2620 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2621 if (adv_instance->instance == instance)
2622 return adv_instance;
2623 }
2624
2625 return NULL;
2626}
2627
2628/* This function requires the caller holds hdev->lock */
Prasanna Karthik74b93e92015-11-18 12:38:41 +00002629struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2630{
Florian Grandeld2609b32015-06-18 03:16:34 +02002631 struct adv_info *cur_instance;
2632
2633 cur_instance = hci_find_adv_instance(hdev, instance);
2634 if (!cur_instance)
2635 return NULL;
2636
2637 if (cur_instance == list_last_entry(&hdev->adv_instances,
2638 struct adv_info, list))
2639 return list_first_entry(&hdev->adv_instances,
2640 struct adv_info, list);
2641 else
2642 return list_next_entry(cur_instance, list);
2643}
2644
2645/* This function requires the caller holds hdev->lock */
2646int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2647{
2648 struct adv_info *adv_instance;
2649
2650 adv_instance = hci_find_adv_instance(hdev, instance);
2651 if (!adv_instance)
2652 return -ENOENT;
2653
2654 BT_DBG("%s removing %dMR", hdev->name, instance);
2655
Johan Hedbergcab054a2015-11-30 11:21:45 +02002656 if (hdev->cur_adv_instance == instance) {
2657 if (hdev->adv_instance_timeout) {
2658 cancel_delayed_work(&hdev->adv_instance_expire);
2659 hdev->adv_instance_timeout = 0;
2660 }
2661 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02002662 }
2663
Florian Grandeld2609b32015-06-18 03:16:34 +02002664 list_del(&adv_instance->list);
2665 kfree(adv_instance);
2666
2667 hdev->adv_instance_cnt--;
2668
2669 return 0;
2670}
2671
2672/* This function requires the caller holds hdev->lock */
2673void hci_adv_instances_clear(struct hci_dev *hdev)
2674{
2675 struct adv_info *adv_instance, *n;
2676
Florian Grandel5d900e42015-06-18 03:16:35 +02002677 if (hdev->adv_instance_timeout) {
2678 cancel_delayed_work(&hdev->adv_instance_expire);
2679 hdev->adv_instance_timeout = 0;
2680 }
2681
Florian Grandeld2609b32015-06-18 03:16:34 +02002682 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2683 list_del(&adv_instance->list);
2684 kfree(adv_instance);
2685 }
2686
2687 hdev->adv_instance_cnt = 0;
Johan Hedbergcab054a2015-11-30 11:21:45 +02002688 hdev->cur_adv_instance = 0x00;
Florian Grandeld2609b32015-06-18 03:16:34 +02002689}
2690
2691/* This function requires the caller holds hdev->lock */
2692int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2693 u16 adv_data_len, u8 *adv_data,
2694 u16 scan_rsp_len, u8 *scan_rsp_data,
2695 u16 timeout, u16 duration)
2696{
2697 struct adv_info *adv_instance;
2698
2699 adv_instance = hci_find_adv_instance(hdev, instance);
2700 if (adv_instance) {
2701 memset(adv_instance->adv_data, 0,
2702 sizeof(adv_instance->adv_data));
2703 memset(adv_instance->scan_rsp_data, 0,
2704 sizeof(adv_instance->scan_rsp_data));
2705 } else {
2706 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2707 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2708 return -EOVERFLOW;
2709
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002710 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002711 if (!adv_instance)
2712 return -ENOMEM;
2713
Florian Grandelfffd38b2015-06-18 03:16:47 +02002714 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002715 adv_instance->instance = instance;
2716 list_add(&adv_instance->list, &hdev->adv_instances);
2717 hdev->adv_instance_cnt++;
2718 }
2719
2720 adv_instance->flags = flags;
2721 adv_instance->adv_data_len = adv_data_len;
2722 adv_instance->scan_rsp_len = scan_rsp_len;
2723
2724 if (adv_data_len)
2725 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2726
2727 if (scan_rsp_len)
2728 memcpy(adv_instance->scan_rsp_data,
2729 scan_rsp_data, scan_rsp_len);
2730
2731 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002732 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002733
2734 if (duration == 0)
2735 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2736 else
2737 adv_instance->duration = duration;
2738
2739 BT_DBG("%s for %dMR", hdev->name, instance);
2740
2741 return 0;
2742}
2743
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002744struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002745 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002746{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002747 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002748
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002749 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002750 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002751 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002752 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002753
2754 return NULL;
2755}
2756
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002757void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002758{
Geliang Tang7eb74042015-12-18 23:33:25 +08002759 struct bdaddr_list *b, *n;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002760
Geliang Tang7eb74042015-12-18 23:33:25 +08002761 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2762 list_del(&b->list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002763 kfree(b);
2764 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002765}
2766
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002767int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002768{
2769 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002770
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002771 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002772 return -EBADF;
2773
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002774 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002775 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002776
Johan Hedberg27f70f32014-07-21 10:50:06 +03002777 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002778 if (!entry)
2779 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002780
2781 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002782 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002783
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002784 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002785
2786 return 0;
2787}
2788
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002789int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002790{
2791 struct bdaddr_list *entry;
2792
Johan Hedberg35f74982014-02-18 17:14:32 +02002793 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002794 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002795 return 0;
2796 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002797
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002798 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002799 if (!entry)
2800 return -ENOENT;
2801
2802 list_del(&entry->list);
2803 kfree(entry);
2804
2805 return 0;
2806}
2807
Andre Guedes15819a72014-02-03 13:56:18 -03002808/* This function requires the caller holds hdev->lock */
2809struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2810 bdaddr_t *addr, u8 addr_type)
2811{
2812 struct hci_conn_params *params;
2813
2814 list_for_each_entry(params, &hdev->le_conn_params, list) {
2815 if (bacmp(&params->addr, addr) == 0 &&
2816 params->addr_type == addr_type) {
2817 return params;
2818 }
2819 }
2820
2821 return NULL;
2822}
2823
2824/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002825struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2826 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002827{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002828 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002829
Johan Hedberg501f8822014-07-04 12:37:26 +03002830 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002831 if (bacmp(&param->addr, addr) == 0 &&
2832 param->addr_type == addr_type)
2833 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002834 }
2835
2836 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002837}
2838
2839/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002840struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2841 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002842{
2843 struct hci_conn_params *params;
2844
2845 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002846 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002847 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002848
2849 params = kzalloc(sizeof(*params), GFP_KERNEL);
2850 if (!params) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002851 bt_dev_err(hdev, "out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002852 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002853 }
2854
2855 bacpy(&params->addr, addr);
2856 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002857
2858 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002859 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002860
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002861 params->conn_min_interval = hdev->le_conn_min_interval;
2862 params->conn_max_interval = hdev->le_conn_max_interval;
2863 params->conn_latency = hdev->le_conn_latency;
2864 params->supervision_timeout = hdev->le_supv_timeout;
2865 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2866
2867 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2868
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002869 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002870}
2871
Johan Hedbergf6c63242014-08-15 21:06:59 +03002872static void hci_conn_params_free(struct hci_conn_params *params)
2873{
2874 if (params->conn) {
2875 hci_conn_drop(params->conn);
2876 hci_conn_put(params->conn);
2877 }
2878
2879 list_del(&params->action);
2880 list_del(&params->list);
2881 kfree(params);
2882}
2883
Andre Guedes15819a72014-02-03 13:56:18 -03002884/* This function requires the caller holds hdev->lock */
2885void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2886{
2887 struct hci_conn_params *params;
2888
2889 params = hci_conn_params_lookup(hdev, addr, addr_type);
2890 if (!params)
2891 return;
2892
Johan Hedbergf6c63242014-08-15 21:06:59 +03002893 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002894
Johan Hedberg95305ba2014-07-04 12:37:21 +03002895 hci_update_background_scan(hdev);
2896
Andre Guedes15819a72014-02-03 13:56:18 -03002897 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2898}
2899
2900/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03002901void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002902{
2903 struct hci_conn_params *params, *tmp;
2904
2905 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03002906 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2907 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02002908
2909 /* If trying to estabilish one time connection to disabled
2910 * device, leave the params, but mark them as just once.
2911 */
2912 if (params->explicit_connect) {
2913 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2914 continue;
2915 }
2916
Andre Guedes15819a72014-02-03 13:56:18 -03002917 list_del(&params->list);
2918 kfree(params);
2919 }
2920
Johan Hedberg55af49a82014-07-02 17:37:26 +03002921 BT_DBG("All LE disabled connection parameters were removed");
2922}
2923
2924/* This function requires the caller holds hdev->lock */
Johan Hedberg030e7f82015-11-10 09:44:53 +02002925static void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002926{
2927 struct hci_conn_params *params, *tmp;
2928
Johan Hedbergf6c63242014-08-15 21:06:59 +03002929 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2930 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002931
2932 BT_DBG("All LE connection parameters were removed");
2933}
2934
Johan Hedberga1f4c312014-02-27 14:05:41 +02002935/* Copy the Identity Address of the controller.
2936 *
2937 * If the controller has a public BD_ADDR, then by default use that one.
2938 * If this is a LE only controller without a public address, default to
2939 * the static random address.
2940 *
2941 * For debugging purposes it is possible to force controllers with a
2942 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002943 *
2944 * In case BR/EDR has been disabled on a dual-mode controller and
2945 * userspace has configured a static address, then that address
2946 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002947 */
2948void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2949 u8 *bdaddr_type)
2950{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002951 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002952 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002953 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002954 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002955 bacpy(bdaddr, &hdev->static_addr);
2956 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2957 } else {
2958 bacpy(bdaddr, &hdev->bdaddr);
2959 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2960 }
2961}
2962
David Herrmann9be0dab2012-04-22 14:39:57 +02002963/* Alloc HCI device */
2964struct hci_dev *hci_alloc_dev(void)
2965{
2966 struct hci_dev *hdev;
2967
Johan Hedberg27f70f32014-07-21 10:50:06 +03002968 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02002969 if (!hdev)
2970 return NULL;
2971
David Herrmannb1b813d2012-04-22 14:39:58 +02002972 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2973 hdev->esco_type = (ESCO_HV1);
2974 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002975 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2976 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02002977 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002978 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2979 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02002980 hdev->adv_instance_cnt = 0;
2981 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02002982 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02002983
David Herrmannb1b813d2012-04-22 14:39:58 +02002984 hdev->sniff_max_interval = 800;
2985 hdev->sniff_min_interval = 80;
2986
Marcel Holtmann3f959d42014-02-20 11:55:56 -08002987 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02002988 hdev->le_adv_min_interval = 0x0800;
2989 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002990 hdev->le_scan_interval = 0x0060;
2991 hdev->le_scan_window = 0x0030;
Jonas Holmbergb48c3b592017-02-23 15:17:02 +01002992 hdev->le_conn_min_interval = 0x0018;
2993 hdev->le_conn_max_interval = 0x0028;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02002994 hdev->le_conn_latency = 0x0000;
2995 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01002996 hdev->le_def_tx_len = 0x001b;
2997 hdev->le_def_tx_time = 0x0148;
2998 hdev->le_max_tx_len = 0x001b;
2999 hdev->le_max_tx_time = 0x0148;
3000 hdev->le_max_rx_len = 0x001b;
3001 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003002
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003003 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003004 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003005 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3006 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003007
David Herrmannb1b813d2012-04-22 14:39:58 +02003008 mutex_init(&hdev->lock);
3009 mutex_init(&hdev->req_lock);
3010
3011 INIT_LIST_HEAD(&hdev->mgmt_pending);
3012 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003013 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003014 INIT_LIST_HEAD(&hdev->uuids);
3015 INIT_LIST_HEAD(&hdev->link_keys);
3016 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003017 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003018 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003019 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003020 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003021 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003022 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003023 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02003024 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02003025
3026 INIT_WORK(&hdev->rx_work, hci_rx_work);
3027 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3028 INIT_WORK(&hdev->tx_work, hci_tx_work);
3029 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003030 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003031
David Herrmannb1b813d2012-04-22 14:39:58 +02003032 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
David Herrmannb1b813d2012-04-22 14:39:58 +02003033
David Herrmannb1b813d2012-04-22 14:39:58 +02003034 skb_queue_head_init(&hdev->rx_q);
3035 skb_queue_head_init(&hdev->cmd_q);
3036 skb_queue_head_init(&hdev->raw_q);
3037
3038 init_waitqueue_head(&hdev->req_wait_q);
3039
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003040 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003041
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02003042 hci_request_setup(hdev);
3043
David Herrmannb1b813d2012-04-22 14:39:58 +02003044 hci_init_sysfs(hdev);
3045 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003046
3047 return hdev;
3048}
3049EXPORT_SYMBOL(hci_alloc_dev);
3050
3051/* Free HCI device */
3052void hci_free_dev(struct hci_dev *hdev)
3053{
David Herrmann9be0dab2012-04-22 14:39:57 +02003054 /* will free via device release */
3055 put_device(&hdev->dev);
3056}
3057EXPORT_SYMBOL(hci_free_dev);
3058
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059/* Register HCI device */
3060int hci_register_dev(struct hci_dev *hdev)
3061{
David Herrmannb1b813d2012-04-22 14:39:58 +02003062 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063
Marcel Holtmann74292d52014-07-06 15:50:27 +02003064 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 return -EINVAL;
3066
Mat Martineau08add512011-11-02 16:18:36 -07003067 /* Do not allow HCI_AMP devices to register at index 0,
3068 * so the index can be used as the AMP controller ID.
3069 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003070 switch (hdev->dev_type) {
Marcel Holtmannca8bee52016-07-05 14:30:14 +02003071 case HCI_PRIMARY:
Sasha Levin3df92b32012-05-27 22:36:56 +02003072 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3073 break;
3074 case HCI_AMP:
3075 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3076 break;
3077 default:
3078 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003080
Sasha Levin3df92b32012-05-27 22:36:56 +02003081 if (id < 0)
3082 return id;
3083
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084 sprintf(hdev->name, "hci%d", id);
3085 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003086
3087 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3088
Tejun Heo29e2dd02017-06-28 14:44:06 -04003089 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003090 if (!hdev->workqueue) {
3091 error = -ENOMEM;
3092 goto err;
3093 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003094
Tejun Heo29e2dd02017-06-28 14:44:06 -04003095 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3096 hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003097 if (!hdev->req_workqueue) {
3098 destroy_workqueue(hdev->workqueue);
3099 error = -ENOMEM;
3100 goto err;
3101 }
3102
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003103 if (!IS_ERR_OR_NULL(bt_debugfs))
3104 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3105
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003106 dev_set_name(&hdev->dev, "%s", hdev->name);
3107
3108 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003109 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003110 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111
Heiner Kallweit6d5d2ee2016-01-08 19:28:58 +01003112 hci_leds_init(hdev);
3113
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003114 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003115 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3116 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003117 if (hdev->rfkill) {
3118 if (rfkill_register(hdev->rfkill) < 0) {
3119 rfkill_destroy(hdev->rfkill);
3120 hdev->rfkill = NULL;
3121 }
3122 }
3123
Johan Hedberg5e130362013-09-13 08:58:17 +03003124 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003125 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003126
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003127 hci_dev_set_flag(hdev, HCI_SETUP);
3128 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003129
Marcel Holtmannca8bee52016-07-05 14:30:14 +02003130 if (hdev->dev_type == HCI_PRIMARY) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003131 /* Assume BR/EDR support until proven otherwise (such as
3132 * through reading supported features during init.
3133 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003134 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003135 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003136
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003137 write_lock(&hci_dev_list_lock);
3138 list_add(&hdev->list, &hci_dev_list);
3139 write_unlock(&hci_dev_list_lock);
3140
Marcel Holtmann4a964402014-07-02 19:10:33 +02003141 /* Devices that are marked for raw-only usage are unconfigured
3142 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003143 */
3144 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003145 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003146
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003147 hci_sock_dev_event(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003148 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149
Johan Hedberg19202572013-01-14 22:33:51 +02003150 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003151
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003153
David Herrmann33ca9542011-10-08 14:58:49 +02003154err_wqueue:
3155 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003156 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003157err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003158 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003159
David Herrmann33ca9542011-10-08 14:58:49 +02003160 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161}
3162EXPORT_SYMBOL(hci_register_dev);
3163
3164/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003165void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003167 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003168
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003169 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003171 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003172
Sasha Levin3df92b32012-05-27 22:36:56 +02003173 id = hdev->id;
3174
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003175 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003177 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003179 cancel_work_sync(&hdev->power_on);
3180
Jiri Slabybf389cab2016-05-13 10:38:49 +02003181 hci_dev_do_close(hdev);
3182
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003183 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003184 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3185 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003186 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003187 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003188 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003189 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003190
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003191 /* mgmt_index_removed should take care of emptying the
3192 * pending list */
3193 BUG_ON(!list_empty(&hdev->mgmt_pending));
3194
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003195 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003197 if (hdev->rfkill) {
3198 rfkill_unregister(hdev->rfkill);
3199 rfkill_destroy(hdev->rfkill);
3200 }
3201
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003202 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003203
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003204 debugfs_remove_recursive(hdev->debugfs);
Marcel Holtmann5177a832016-07-17 19:55:16 +02003205 kfree_const(hdev->hw_info);
3206 kfree_const(hdev->fw_info);
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003207
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003208 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003209 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003210
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003211 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003212 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003213 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003214 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003215 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003216 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003217 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003218 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003219 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003220 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003221 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003222 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003223 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003224
David Herrmanndc946bd2012-01-07 15:47:24 +01003225 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003226
3227 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228}
3229EXPORT_SYMBOL(hci_unregister_dev);
3230
3231/* Suspend HCI device */
3232int hci_suspend_dev(struct hci_dev *hdev)
3233{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003234 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235 return 0;
3236}
3237EXPORT_SYMBOL(hci_suspend_dev);
3238
3239/* Resume HCI device */
3240int hci_resume_dev(struct hci_dev *hdev)
3241{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003242 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 return 0;
3244}
3245EXPORT_SYMBOL(hci_resume_dev);
3246
Marcel Holtmann75e05692014-11-02 08:15:38 +01003247/* Reset HCI device */
3248int hci_reset_dev(struct hci_dev *hdev)
3249{
3250 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3251 struct sk_buff *skb;
3252
3253 skb = bt_skb_alloc(3, GFP_ATOMIC);
3254 if (!skb)
3255 return -ENOMEM;
3256
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003257 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
Johannes Berg59ae1d12017-06-16 14:29:20 +02003258 skb_put_data(skb, hw_err, 3);
Marcel Holtmann75e05692014-11-02 08:15:38 +01003259
3260 /* Send Hardware Error to upper stack */
3261 return hci_recv_frame(hdev, skb);
3262}
3263EXPORT_SYMBOL(hci_reset_dev);
3264
Marcel Holtmann76bca882009-11-18 00:40:39 +01003265/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003266int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003267{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003268 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003269 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003270 kfree_skb(skb);
3271 return -ENXIO;
3272 }
3273
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003274 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3275 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3276 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmannfe806dc2015-10-08 03:14:28 +02003277 kfree_skb(skb);
3278 return -EINVAL;
3279 }
3280
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003281 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003282 bt_cb(skb)->incoming = 1;
3283
3284 /* Time stamp */
3285 __net_timestamp(skb);
3286
Marcel Holtmann76bca882009-11-18 00:40:39 +01003287 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003288 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003289
Marcel Holtmann76bca882009-11-18 00:40:39 +01003290 return 0;
3291}
3292EXPORT_SYMBOL(hci_recv_frame);
3293
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003294/* Receive diagnostic message from HCI drivers */
3295int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3296{
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003297 /* Mark as diagnostic packet */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003298 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003299
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003300 /* Time stamp */
3301 __net_timestamp(skb);
3302
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003303 skb_queue_tail(&hdev->rx_q, skb);
3304 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003305
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003306 return 0;
3307}
3308EXPORT_SYMBOL(hci_recv_diag);
3309
Marcel Holtmann5177a832016-07-17 19:55:16 +02003310void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3311{
3312 va_list vargs;
3313
3314 va_start(vargs, fmt);
3315 kfree_const(hdev->hw_info);
3316 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3317 va_end(vargs);
3318}
3319EXPORT_SYMBOL(hci_set_hw_info);
3320
3321void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3322{
3323 va_list vargs;
3324
3325 va_start(vargs, fmt);
3326 kfree_const(hdev->fw_info);
3327 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3328 va_end(vargs);
3329}
3330EXPORT_SYMBOL(hci_set_fw_info);
3331
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332/* ---- Interface to upper protocols ---- */
3333
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334int hci_register_cb(struct hci_cb *cb)
3335{
3336 BT_DBG("%p name %s", cb, cb->name);
3337
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003338 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003339 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003340 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341
3342 return 0;
3343}
3344EXPORT_SYMBOL(hci_register_cb);
3345
3346int hci_unregister_cb(struct hci_cb *cb)
3347{
3348 BT_DBG("%p name %s", cb, cb->name);
3349
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003350 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003352 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353
3354 return 0;
3355}
3356EXPORT_SYMBOL(hci_unregister_cb);
3357
Marcel Holtmann51086992013-10-10 14:54:19 -07003358static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003360 int err;
3361
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003362 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3363 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003365 /* Time stamp */
3366 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003368 /* Send copy to monitor */
3369 hci_send_to_monitor(hdev, skb);
3370
3371 if (atomic_read(&hdev->promisc)) {
3372 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003373 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 }
3375
3376 /* Get rid of skb owner, prior to sending to the driver. */
3377 skb_orphan(skb);
3378
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003379 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3380 kfree_skb(skb);
3381 return;
3382 }
3383
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003384 err = hdev->send(hdev, skb);
3385 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01003386 bt_dev_err(hdev, "sending frame failed (%d)", err);
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003387 kfree_skb(skb);
3388 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389}
3390
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003391/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003392int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3393 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003394{
3395 struct sk_buff *skb;
3396
3397 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3398
3399 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3400 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01003401 bt_dev_err(hdev, "no memory for command");
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003402 return -ENOMEM;
3403 }
3404
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003405 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003406 * single-command requests.
3407 */
Johan Hedberg44d27132015-11-05 09:31:40 +02003408 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg11714b32013-03-05 20:37:47 +02003409
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003411 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412
3413 return 0;
3414}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415
Loic Poulaind6ee6ad2018-04-26 13:13:26 +02003416int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3417 const void *param)
3418{
3419 struct sk_buff *skb;
3420
3421 if (hci_opcode_ogf(opcode) != 0x3f) {
3422 /* A controller receiving a command shall respond with either
3423 * a Command Status Event or a Command Complete Event.
3424 * Therefore, all standard HCI commands must be sent via the
3425 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3426 * Some vendors do not comply with this rule for vendor-specific
3427 * commands and do not return any event. We want to support
3428 * unresponded commands for such cases only.
3429 */
3430 bt_dev_err(hdev, "unresponded command not supported");
3431 return -EINVAL;
3432 }
3433
3434 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3435 if (!skb) {
3436 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3437 opcode);
3438 return -ENOMEM;
3439 }
3440
3441 hci_send_frame(hdev, skb);
3442
3443 return 0;
3444}
3445EXPORT_SYMBOL(__hci_cmd_send);
3446
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003448void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449{
3450 struct hci_command_hdr *hdr;
3451
3452 if (!hdev->sent_cmd)
3453 return NULL;
3454
3455 hdr = (void *) hdev->sent_cmd->data;
3456
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003457 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 return NULL;
3459
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003460 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461
3462 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3463}
3464
Loic Poulainfbef1682015-09-29 15:05:44 +02003465/* Send HCI command and wait for command commplete event */
3466struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3467 const void *param, u32 timeout)
3468{
3469 struct sk_buff *skb;
3470
3471 if (!test_bit(HCI_UP, &hdev->flags))
3472 return ERR_PTR(-ENETDOWN);
3473
3474 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3475
Johan Hedbergb5044302015-11-10 09:44:55 +02003476 hci_req_sync_lock(hdev);
Loic Poulainfbef1682015-09-29 15:05:44 +02003477 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
Johan Hedbergb5044302015-11-10 09:44:55 +02003478 hci_req_sync_unlock(hdev);
Loic Poulainfbef1682015-09-29 15:05:44 +02003479
3480 return skb;
3481}
3482EXPORT_SYMBOL(hci_cmd_sync);
3483
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484/* Send ACL data */
3485static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3486{
3487 struct hci_acl_hdr *hdr;
3488 int len = skb->len;
3489
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003490 skb_push(skb, HCI_ACL_HDR_SIZE);
3491 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003492 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003493 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3494 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495}
3496
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003497static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003498 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003500 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501 struct hci_dev *hdev = conn->hdev;
3502 struct sk_buff *list;
3503
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003504 skb->len = skb_headlen(skb);
3505 skb->data_len = 0;
3506
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003507 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003508
3509 switch (hdev->dev_type) {
Marcel Holtmannca8bee52016-07-05 14:30:14 +02003510 case HCI_PRIMARY:
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003511 hci_add_acl_hdr(skb, conn->handle, flags);
3512 break;
3513 case HCI_AMP:
3514 hci_add_acl_hdr(skb, chan->handle, flags);
3515 break;
3516 default:
Marcel Holtmann2064ee32017-10-30 10:42:59 +01003517 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003518 return;
3519 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003520
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003521 list = skb_shinfo(skb)->frag_list;
3522 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523 /* Non fragmented */
3524 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3525
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003526 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 } else {
3528 /* Fragmented */
3529 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3530
3531 skb_shinfo(skb)->frag_list = NULL;
3532
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003533 /* Queue all fragments atomically. We need to use spin_lock_bh
3534 * here because of 6LoWPAN links, as there this function is
3535 * called from softirq and using normal spin lock could cause
3536 * deadlocks.
3537 */
3538 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003540 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003541
3542 flags &= ~ACL_START;
3543 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544 do {
3545 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003546
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003547 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003548 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549
3550 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3551
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003552 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 } while (list);
3554
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003555 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003557}
3558
3559void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3560{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003561 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003562
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003563 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003564
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003565 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003567 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569
3570/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003571void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572{
3573 struct hci_dev *hdev = conn->hdev;
3574 struct hci_sco_hdr hdr;
3575
3576 BT_DBG("%s len %d", hdev->name, skb->len);
3577
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003578 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579 hdr.dlen = skb->len;
3580
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003581 skb_push(skb, HCI_SCO_HDR_SIZE);
3582 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003583 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003585 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003586
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003588 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590
3591/* ---- HCI TX task (outgoing data) ---- */
3592
3593/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003594static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3595 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596{
3597 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003598 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003599 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003601 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003603
3604 rcu_read_lock();
3605
3606 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003607 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003609
3610 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3611 continue;
3612
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 num++;
3614
3615 if (c->sent < min) {
3616 min = c->sent;
3617 conn = c;
3618 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003619
3620 if (hci_conn_num(hdev, type) == num)
3621 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622 }
3623
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003624 rcu_read_unlock();
3625
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003627 int cnt, q;
3628
3629 switch (conn->type) {
3630 case ACL_LINK:
3631 cnt = hdev->acl_cnt;
3632 break;
3633 case SCO_LINK:
3634 case ESCO_LINK:
3635 cnt = hdev->sco_cnt;
3636 break;
3637 case LE_LINK:
3638 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3639 break;
3640 default:
3641 cnt = 0;
Marcel Holtmann2064ee32017-10-30 10:42:59 +01003642 bt_dev_err(hdev, "unknown link type %d", conn->type);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003643 }
3644
3645 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646 *quote = q ? q : 1;
3647 } else
3648 *quote = 0;
3649
3650 BT_DBG("conn %p quote %d", conn, *quote);
3651 return conn;
3652}
3653
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003654static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655{
3656 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003657 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658
Marcel Holtmann2064ee32017-10-30 10:42:59 +01003659 bt_dev_err(hdev, "link tx timeout");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003661 rcu_read_lock();
3662
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003664 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003665 if (c->type == type && c->sent) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01003666 bt_dev_err(hdev, "killing stalled connection %pMR",
3667 &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003668 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669 }
3670 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003671
3672 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673}
3674
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003675static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3676 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003677{
3678 struct hci_conn_hash *h = &hdev->conn_hash;
3679 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003680 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003681 struct hci_conn *conn;
3682 int cnt, q, conn_num = 0;
3683
3684 BT_DBG("%s", hdev->name);
3685
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003686 rcu_read_lock();
3687
3688 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003689 struct hci_chan *tmp;
3690
3691 if (conn->type != type)
3692 continue;
3693
3694 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3695 continue;
3696
3697 conn_num++;
3698
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003699 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003700 struct sk_buff *skb;
3701
3702 if (skb_queue_empty(&tmp->data_q))
3703 continue;
3704
3705 skb = skb_peek(&tmp->data_q);
3706 if (skb->priority < cur_prio)
3707 continue;
3708
3709 if (skb->priority > cur_prio) {
3710 num = 0;
3711 min = ~0;
3712 cur_prio = skb->priority;
3713 }
3714
3715 num++;
3716
3717 if (conn->sent < min) {
3718 min = conn->sent;
3719 chan = tmp;
3720 }
3721 }
3722
3723 if (hci_conn_num(hdev, type) == conn_num)
3724 break;
3725 }
3726
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003727 rcu_read_unlock();
3728
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003729 if (!chan)
3730 return NULL;
3731
3732 switch (chan->conn->type) {
3733 case ACL_LINK:
3734 cnt = hdev->acl_cnt;
3735 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003736 case AMP_LINK:
3737 cnt = hdev->block_cnt;
3738 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003739 case SCO_LINK:
3740 case ESCO_LINK:
3741 cnt = hdev->sco_cnt;
3742 break;
3743 case LE_LINK:
3744 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3745 break;
3746 default:
3747 cnt = 0;
Marcel Holtmann2064ee32017-10-30 10:42:59 +01003748 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003749 }
3750
3751 q = cnt / num;
3752 *quote = q ? q : 1;
3753 BT_DBG("chan %p quote %d", chan, *quote);
3754 return chan;
3755}
3756
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003757static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3758{
3759 struct hci_conn_hash *h = &hdev->conn_hash;
3760 struct hci_conn *conn;
3761 int num = 0;
3762
3763 BT_DBG("%s", hdev->name);
3764
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003765 rcu_read_lock();
3766
3767 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003768 struct hci_chan *chan;
3769
3770 if (conn->type != type)
3771 continue;
3772
3773 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3774 continue;
3775
3776 num++;
3777
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003778 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003779 struct sk_buff *skb;
3780
3781 if (chan->sent) {
3782 chan->sent = 0;
3783 continue;
3784 }
3785
3786 if (skb_queue_empty(&chan->data_q))
3787 continue;
3788
3789 skb = skb_peek(&chan->data_q);
3790 if (skb->priority >= HCI_PRIO_MAX - 1)
3791 continue;
3792
3793 skb->priority = HCI_PRIO_MAX - 1;
3794
3795 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003796 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003797 }
3798
3799 if (hci_conn_num(hdev, type) == num)
3800 break;
3801 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003802
3803 rcu_read_unlock();
3804
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003805}
3806
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003807static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3808{
3809 /* Calculate count of blocks used by this packet */
3810 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3811}
3812
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003813static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003815 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816 /* ACL tx timeout must be longer than maximum
3817 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003818 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003819 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003820 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003822}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003824static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003825{
3826 unsigned int cnt = hdev->acl_cnt;
3827 struct hci_chan *chan;
3828 struct sk_buff *skb;
3829 int quote;
3830
3831 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003832
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003833 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003834 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003835 u32 priority = (skb_peek(&chan->data_q))->priority;
3836 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003837 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003838 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003839
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003840 /* Stop if priority has changed */
3841 if (skb->priority < priority)
3842 break;
3843
3844 skb = skb_dequeue(&chan->data_q);
3845
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003846 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003847 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003848
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003849 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850 hdev->acl_last_tx = jiffies;
3851
3852 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003853 chan->sent++;
3854 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855 }
3856 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003857
3858 if (cnt != hdev->acl_cnt)
3859 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860}
3861
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003862static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003863{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003864 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003865 struct hci_chan *chan;
3866 struct sk_buff *skb;
3867 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003868 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003869
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003870 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003871
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003872 BT_DBG("%s", hdev->name);
3873
3874 if (hdev->dev_type == HCI_AMP)
3875 type = AMP_LINK;
3876 else
3877 type = ACL_LINK;
3878
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003879 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003880 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003881 u32 priority = (skb_peek(&chan->data_q))->priority;
3882 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3883 int blocks;
3884
3885 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003886 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003887
3888 /* Stop if priority has changed */
3889 if (skb->priority < priority)
3890 break;
3891
3892 skb = skb_dequeue(&chan->data_q);
3893
3894 blocks = __get_blocks(hdev, skb);
3895 if (blocks > hdev->block_cnt)
3896 return;
3897
3898 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003899 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003900
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003901 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003902 hdev->acl_last_tx = jiffies;
3903
3904 hdev->block_cnt -= blocks;
3905 quote -= blocks;
3906
3907 chan->sent += blocks;
3908 chan->conn->sent += blocks;
3909 }
3910 }
3911
3912 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003913 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003914}
3915
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003916static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003917{
3918 BT_DBG("%s", hdev->name);
3919
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003920 /* No ACL link over BR/EDR controller */
Marcel Holtmannca8bee52016-07-05 14:30:14 +02003921 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003922 return;
3923
3924 /* No AMP link over AMP controller */
3925 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003926 return;
3927
3928 switch (hdev->flow_ctl_mode) {
3929 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3930 hci_sched_acl_pkt(hdev);
3931 break;
3932
3933 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3934 hci_sched_acl_blk(hdev);
3935 break;
3936 }
3937}
3938
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003940static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941{
3942 struct hci_conn *conn;
3943 struct sk_buff *skb;
3944 int quote;
3945
3946 BT_DBG("%s", hdev->name);
3947
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003948 if (!hci_conn_num(hdev, SCO_LINK))
3949 return;
3950
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3952 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3953 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003954 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955
3956 conn->sent++;
3957 if (conn->sent == ~0)
3958 conn->sent = 0;
3959 }
3960 }
3961}
3962
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003963static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003964{
3965 struct hci_conn *conn;
3966 struct sk_buff *skb;
3967 int quote;
3968
3969 BT_DBG("%s", hdev->name);
3970
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003971 if (!hci_conn_num(hdev, ESCO_LINK))
3972 return;
3973
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003974 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3975 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003976 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3977 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003978 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003979
3980 conn->sent++;
3981 if (conn->sent == ~0)
3982 conn->sent = 0;
3983 }
3984 }
3985}
3986
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003987static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003988{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003989 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003990 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003991 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003992
3993 BT_DBG("%s", hdev->name);
3994
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003995 if (!hci_conn_num(hdev, LE_LINK))
3996 return;
3997
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003998 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003999 /* LE tx timeout must be longer than maximum
4000 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004001 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004002 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004003 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004004 }
4005
4006 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004007 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004008 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004009 u32 priority = (skb_peek(&chan->data_q))->priority;
4010 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004011 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004012 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004013
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004014 /* Stop if priority has changed */
4015 if (skb->priority < priority)
4016 break;
4017
4018 skb = skb_dequeue(&chan->data_q);
4019
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004020 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004021 hdev->le_last_tx = jiffies;
4022
4023 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004024 chan->sent++;
4025 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004026 }
4027 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004028
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004029 if (hdev->le_pkts)
4030 hdev->le_cnt = cnt;
4031 else
4032 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004033
4034 if (cnt != tmp)
4035 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004036}
4037
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004038static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004040 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041 struct sk_buff *skb;
4042
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004043 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004044 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004046 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07004047 /* Schedule queues and send stuff to HCI driver */
4048 hci_sched_acl(hdev);
4049 hci_sched_sco(hdev);
4050 hci_sched_esco(hdev);
4051 hci_sched_le(hdev);
4052 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004053
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054 /* Send next queued raw (unknown type) packet */
4055 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004056 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057}
4058
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004059/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060
4061/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004062static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063{
4064 struct hci_acl_hdr *hdr = (void *) skb->data;
4065 struct hci_conn *conn;
4066 __u16 handle, flags;
4067
4068 skb_pull(skb, HCI_ACL_HDR_SIZE);
4069
4070 handle = __le16_to_cpu(hdr->handle);
4071 flags = hci_flags(handle);
4072 handle = hci_handle(handle);
4073
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004074 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004075 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076
4077 hdev->stat.acl_rx++;
4078
4079 hci_dev_lock(hdev);
4080 conn = hci_conn_hash_lookup_handle(hdev, handle);
4081 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004082
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004084 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004085
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004087 l2cap_recv_acldata(conn, skb, flags);
4088 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089 } else {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01004090 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4091 handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092 }
4093
4094 kfree_skb(skb);
4095}
4096
4097/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004098static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099{
4100 struct hci_sco_hdr *hdr = (void *) skb->data;
4101 struct hci_conn *conn;
4102 __u16 handle;
4103
4104 skb_pull(skb, HCI_SCO_HDR_SIZE);
4105
4106 handle = __le16_to_cpu(hdr->handle);
4107
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004108 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109
4110 hdev->stat.sco_rx++;
4111
4112 hci_dev_lock(hdev);
4113 conn = hci_conn_hash_lookup_handle(hdev, handle);
4114 hci_dev_unlock(hdev);
4115
4116 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004118 sco_recv_scodata(conn, skb);
4119 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120 } else {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01004121 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4122 handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123 }
4124
4125 kfree_skb(skb);
4126}
4127
Johan Hedberg9238f362013-03-05 20:37:48 +02004128static bool hci_req_is_complete(struct hci_dev *hdev)
4129{
4130 struct sk_buff *skb;
4131
4132 skb = skb_peek(&hdev->cmd_q);
4133 if (!skb)
4134 return true;
4135
Johan Hedberg44d27132015-11-05 09:31:40 +02004136 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
Johan Hedberg9238f362013-03-05 20:37:48 +02004137}
4138
Johan Hedberg42c6b122013-03-05 20:37:49 +02004139static void hci_resend_last(struct hci_dev *hdev)
4140{
4141 struct hci_command_hdr *sent;
4142 struct sk_buff *skb;
4143 u16 opcode;
4144
4145 if (!hdev->sent_cmd)
4146 return;
4147
4148 sent = (void *) hdev->sent_cmd->data;
4149 opcode = __le16_to_cpu(sent->opcode);
4150 if (opcode == HCI_OP_RESET)
4151 return;
4152
4153 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4154 if (!skb)
4155 return;
4156
4157 skb_queue_head(&hdev->cmd_q, skb);
4158 queue_work(hdev->workqueue, &hdev->cmd_work);
4159}
4160
Johan Hedberge62144872015-04-02 13:41:08 +03004161void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4162 hci_req_complete_t *req_complete,
4163 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004164{
Johan Hedberg9238f362013-03-05 20:37:48 +02004165 struct sk_buff *skb;
4166 unsigned long flags;
4167
4168 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4169
Johan Hedberg42c6b122013-03-05 20:37:49 +02004170 /* If the completed command doesn't match the last one that was
4171 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004172 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004173 if (!hci_sent_cmd_data(hdev, opcode)) {
4174 /* Some CSR based controllers generate a spontaneous
4175 * reset complete event during init and any pending
4176 * command will never be completed. In such a case we
4177 * need to resend whatever was the last sent
4178 * command.
4179 */
4180 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4181 hci_resend_last(hdev);
4182
Johan Hedberg9238f362013-03-05 20:37:48 +02004183 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004184 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004185
4186 /* If the command succeeded and there's still more commands in
4187 * this request the request is not yet complete.
4188 */
4189 if (!status && !hci_req_is_complete(hdev))
4190 return;
4191
4192 /* If this was the last command in a request the complete
4193 * callback would be found in hdev->sent_cmd instead of the
4194 * command queue (hdev->cmd_q).
4195 */
Johan Hedberg44d27132015-11-05 09:31:40 +02004196 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4197 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
Johan Hedberge62144872015-04-02 13:41:08 +03004198 return;
4199 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004200
Johan Hedberg44d27132015-11-05 09:31:40 +02004201 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4202 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
Johan Hedberge62144872015-04-02 13:41:08 +03004203 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004204 }
4205
4206 /* Remove all pending commands belonging to this request */
4207 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4208 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Johan Hedberg44d27132015-11-05 09:31:40 +02004209 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004210 __skb_queue_head(&hdev->cmd_q, skb);
4211 break;
4212 }
4213
Douglas Anderson3bd75942016-02-19 14:25:21 -08004214 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4215 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4216 else
4217 *req_complete = bt_cb(skb)->hci.req_complete;
Johan Hedberg9238f362013-03-05 20:37:48 +02004218 kfree_skb(skb);
4219 }
4220 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004221}
4222
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004223static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004225 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226 struct sk_buff *skb;
4227
4228 BT_DBG("%s", hdev->name);
4229
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004231 /* Send copy to monitor */
4232 hci_send_to_monitor(hdev, skb);
4233
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 if (atomic_read(&hdev->promisc)) {
4235 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004236 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 }
4238
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004239 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240 kfree_skb(skb);
4241 continue;
4242 }
4243
4244 if (test_bit(HCI_INIT, &hdev->flags)) {
4245 /* Don't process data packets in this states. */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004246 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 case HCI_ACLDATA_PKT:
4248 case HCI_SCODATA_PKT:
4249 kfree_skb(skb);
4250 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004251 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252 }
4253
4254 /* Process frame */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004255 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004257 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258 hci_event_packet(hdev, skb);
4259 break;
4260
4261 case HCI_ACLDATA_PKT:
4262 BT_DBG("%s ACL data packet", hdev->name);
4263 hci_acldata_packet(hdev, skb);
4264 break;
4265
4266 case HCI_SCODATA_PKT:
4267 BT_DBG("%s SCO data packet", hdev->name);
4268 hci_scodata_packet(hdev, skb);
4269 break;
4270
4271 default:
4272 kfree_skb(skb);
4273 break;
4274 }
4275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276}
4277
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004278static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004280 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004281 struct sk_buff *skb;
4282
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004283 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4284 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004287 if (atomic_read(&hdev->cmd_cnt)) {
4288 skb = skb_dequeue(&hdev->cmd_q);
4289 if (!skb)
4290 return;
4291
Wei Yongjun7585b972009-02-25 18:29:52 +08004292 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004294 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004295 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004297 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004298 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004299 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004300 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004301 schedule_delayed_work(&hdev->cmd_timer,
4302 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 } else {
4304 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004305 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306 }
4307 }
4308}