blob: 3604e0194f875dcd6cde4d68549cbd0138d7433f [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020054DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070083 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070097
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700123 kfree_skb(skb);
124
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700126
127 return count;
128}
129
130static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135};
136
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200137static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
138 size_t count, loff_t *ppos)
139{
140 struct hci_dev *hdev = file->private_data;
141 char buf[3];
142
143 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
144 buf[1] = '\n';
145 buf[2] = '\0';
146 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
147}
148
149static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
150 size_t count, loff_t *ppos)
151{
152 struct hci_dev *hdev = file->private_data;
153 char buf[32];
154 size_t buf_size = min(count, (sizeof(buf)-1));
155 bool enable;
156 int err;
157
158 if (copy_from_user(buf, user_buf, buf_size))
159 return -EFAULT;
160
161 buf[buf_size] = '\0';
162 if (strtobool(buf, &enable))
163 return -EINVAL;
164
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200165 /* When the diagnostic flags are not persistent and the transport
166 * is not active, then there is no need for the vendor callback.
167 *
168 * Instead just store the desired value. If needed the setting
169 * will be programmed when the controller gets powered on.
170 */
171 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
172 !test_bit(HCI_RUNNING, &hdev->flags))
173 goto done;
174
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200175 hci_req_lock(hdev);
176 err = hdev->set_diag(hdev, enable);
177 hci_req_unlock(hdev);
178
179 if (err < 0)
180 return err;
181
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200182done:
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200183 if (enable)
184 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
185 else
186 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
187
188 return count;
189}
190
191static const struct file_operations vendor_diag_fops = {
192 .open = simple_open,
193 .read = vendor_diag_read,
194 .write = vendor_diag_write,
195 .llseek = default_llseek,
196};
197
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200198static void hci_debugfs_create_basic(struct hci_dev *hdev)
199{
200 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
201 &dut_mode_fops);
202
203 if (hdev->set_diag)
204 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
205 &vendor_diag_fops);
206}
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208/* ---- HCI requests ---- */
209
Johan Hedbergf60cb302015-04-02 13:41:09 +0300210static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
211 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200213 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215 if (hdev->req_status == HCI_REQ_PEND) {
216 hdev->req_result = result;
217 hdev->req_status = HCI_REQ_DONE;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300218 if (skb)
219 hdev->req_skb = skb_get(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 wake_up_interruptible(&hdev->req_wait_q);
221 }
222}
223
224static void hci_req_cancel(struct hci_dev *hdev, int err)
225{
226 BT_DBG("%s err 0x%2.2x", hdev->name, err);
227
228 if (hdev->req_status == HCI_REQ_PEND) {
229 hdev->req_result = err;
230 hdev->req_status = HCI_REQ_CANCELED;
231 wake_up_interruptible(&hdev->req_wait_q);
232 }
233}
234
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300235struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300236 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300237{
238 DECLARE_WAITQUEUE(wait, current);
239 struct hci_request req;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300240 struct sk_buff *skb;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300241 int err = 0;
242
243 BT_DBG("%s", hdev->name);
244
245 hci_req_init(&req, hdev);
246
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300247 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300248
249 hdev->req_status = HCI_REQ_PEND;
250
Johan Hedberg75e84b72013-04-02 13:35:04 +0300251 add_wait_queue(&hdev->req_wait_q, &wait);
252 set_current_state(TASK_INTERRUPTIBLE);
253
Johan Hedbergf60cb302015-04-02 13:41:09 +0300254 err = hci_req_run_skb(&req, hci_req_sync_complete);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900255 if (err < 0) {
256 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200257 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900258 return ERR_PTR(err);
259 }
260
Johan Hedberg75e84b72013-04-02 13:35:04 +0300261 schedule_timeout(timeout);
262
263 remove_wait_queue(&hdev->req_wait_q, &wait);
264
265 if (signal_pending(current))
266 return ERR_PTR(-EINTR);
267
268 switch (hdev->req_status) {
269 case HCI_REQ_DONE:
270 err = -bt_to_errno(hdev->req_result);
271 break;
272
273 case HCI_REQ_CANCELED:
274 err = -hdev->req_result;
275 break;
276
277 default:
278 err = -ETIMEDOUT;
279 break;
280 }
281
282 hdev->req_status = hdev->req_result = 0;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300283 skb = hdev->req_skb;
284 hdev->req_skb = NULL;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300285
286 BT_DBG("%s end: err %d", hdev->name, err);
287
Johan Hedbergf60cb302015-04-02 13:41:09 +0300288 if (err < 0) {
289 kfree_skb(skb);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300290 return ERR_PTR(err);
Johan Hedbergf60cb302015-04-02 13:41:09 +0300291 }
Johan Hedberg75e84b72013-04-02 13:35:04 +0300292
Johan Hedberg757aa0b2015-04-02 13:41:12 +0300293 if (!skb)
294 return ERR_PTR(-ENODATA);
295
296 return skb;
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300297}
298EXPORT_SYMBOL(__hci_cmd_sync_ev);
299
300struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300301 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300302{
303 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300304}
305EXPORT_SYMBOL(__hci_cmd_sync);
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200308static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200309 void (*func)(struct hci_request *req,
310 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200311 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200313 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 DECLARE_WAITQUEUE(wait, current);
315 int err = 0;
316
317 BT_DBG("%s start", hdev->name);
318
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 hci_req_init(&req, hdev);
320
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 hdev->req_status = HCI_REQ_PEND;
322
Johan Hedberg42c6b122013-03-05 20:37:49 +0200323 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200324
Chan-yeol Park039fada2014-10-31 14:23:06 +0900325 add_wait_queue(&hdev->req_wait_q, &wait);
326 set_current_state(TASK_INTERRUPTIBLE);
327
Johan Hedbergf60cb302015-04-02 13:41:09 +0300328 err = hci_req_run_skb(&req, hci_req_sync_complete);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200330 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300331
Chan-yeol Park039fada2014-10-31 14:23:06 +0900332 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200333 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900334
Andre Guedes920c8302013-03-08 11:20:15 -0300335 /* ENODATA means the HCI request command queue is empty.
336 * This can happen when a request with conditionals doesn't
337 * trigger any commands to be sent. This is normal behavior
338 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200339 */
Andre Guedes920c8302013-03-08 11:20:15 -0300340 if (err == -ENODATA)
341 return 0;
342
343 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200344 }
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 schedule_timeout(timeout);
347
348 remove_wait_queue(&hdev->req_wait_q, &wait);
349
350 if (signal_pending(current))
351 return -EINTR;
352
353 switch (hdev->req_status) {
354 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700355 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 break;
357
358 case HCI_REQ_CANCELED:
359 err = -hdev->req_result;
360 break;
361
362 default:
363 err = -ETIMEDOUT;
364 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Johan Hedberga5040ef2011-01-10 13:28:59 +0200367 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
369 BT_DBG("%s end: err %d", hdev->name, err);
370
371 return err;
372}
373
Johan Hedberg01178cd2013-03-05 20:37:41 +0200374static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200375 void (*req)(struct hci_request *req,
376 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200377 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378{
379 int ret;
380
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200381 if (!test_bit(HCI_UP, &hdev->flags))
382 return -ENETDOWN;
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 /* Serialize all requests */
385 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200386 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 hci_req_unlock(hdev);
388
389 return ret;
390}
391
Johan Hedberg42c6b122013-03-05 20:37:49 +0200392static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
396 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200397 set_bit(HCI_RESET, &req->hdev->flags);
398 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399}
400
Johan Hedberg42c6b122013-03-05 20:37:49 +0200401static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200403 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200404
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200406 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200408 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200409 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200410
411 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200412 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413}
414
Johan Hedberg0af801b2015-02-17 15:05:21 +0200415static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200416{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200417 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200418
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200419 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200420 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300421
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700422 /* Read Local Supported Commands */
423 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
424
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300425 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300427
428 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200429 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700430
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700431 /* Read Flow Control Mode */
432 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
433
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700434 /* Read Location Data */
435 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200436}
437
Johan Hedberg0af801b2015-02-17 15:05:21 +0200438static void amp_init2(struct hci_request *req)
439{
440 /* Read Local Supported Features. Not all AMP controllers
441 * support this so it's placed conditionally in the second
442 * stage init.
443 */
444 if (req->hdev->commands[14] & 0x20)
445 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
446}
447
Johan Hedberg42c6b122013-03-05 20:37:49 +0200448static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200449{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200450 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200451
452 BT_DBG("%s %ld", hdev->name, opt);
453
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300454 /* Reset */
455 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200456 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300457
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200458 switch (hdev->dev_type) {
459 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200460 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200461 break;
462
463 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200464 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200465 break;
466
467 default:
468 BT_ERR("Unknown device type %d", hdev->dev_type);
469 break;
470 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200471}
472
Johan Hedberg42c6b122013-03-05 20:37:49 +0200473static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200474{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200475 __le16 param;
476 __u8 flt_type;
477
478 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200479 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480
481 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200482 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200483
484 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200485 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200486
487 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200488 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700490 /* Read Number of Supported IAC */
491 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
492
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700493 /* Read Current IAC LAP */
494 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
495
Johan Hedberg2177bab2013-03-05 20:37:43 +0200496 /* Clear Event Filters */
497 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200498 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200499
500 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700501 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200503}
504
Johan Hedberg42c6b122013-03-05 20:37:49 +0200505static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200506{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300507 struct hci_dev *hdev = req->hdev;
508
Johan Hedberg2177bab2013-03-05 20:37:43 +0200509 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200510 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200511
512 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200513 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200514
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800515 /* Read LE Supported States */
516 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
517
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200519 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800521 /* Clear LE White List */
522 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300523
524 /* LE-only controllers have LE implicitly enabled */
525 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700526 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527}
528
Johan Hedberg42c6b122013-03-05 20:37:49 +0200529static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200530{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200531 struct hci_dev *hdev = req->hdev;
532
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533 /* The second byte is 0xff instead of 0x9f (two reserved bits
534 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
535 * command otherwise.
536 */
537 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
538
539 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
540 * any event mask for pre 1.2 devices.
541 */
542 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
543 return;
544
545 if (lmp_bredr_capable(hdev)) {
546 events[4] |= 0x01; /* Flow Specification Complete */
547 events[4] |= 0x02; /* Inquiry Result with RSSI */
548 events[4] |= 0x04; /* Read Remote Extended Features Complete */
549 events[5] |= 0x08; /* Synchronous Connection Complete */
550 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700551 } else {
552 /* Use a different default for LE-only devices */
553 memset(events, 0, sizeof(events));
554 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700555 events[1] |= 0x08; /* Read Remote Version Information Complete */
556 events[1] |= 0x20; /* Command Complete */
557 events[1] |= 0x40; /* Command Status */
558 events[1] |= 0x80; /* Hardware Error */
559 events[2] |= 0x04; /* Number of Completed Packets */
560 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200561
562 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
563 events[0] |= 0x80; /* Encryption Change */
564 events[5] |= 0x80; /* Encryption Key Refresh Complete */
565 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200566 }
567
568 if (lmp_inq_rssi_capable(hdev))
569 events[4] |= 0x02; /* Inquiry Result with RSSI */
570
571 if (lmp_sniffsubr_capable(hdev))
572 events[5] |= 0x20; /* Sniff Subrating */
573
574 if (lmp_pause_enc_capable(hdev))
575 events[5] |= 0x80; /* Encryption Key Refresh Complete */
576
577 if (lmp_ext_inq_capable(hdev))
578 events[5] |= 0x40; /* Extended Inquiry Result */
579
580 if (lmp_no_flush_capable(hdev))
581 events[7] |= 0x01; /* Enhanced Flush Complete */
582
583 if (lmp_lsto_capable(hdev))
584 events[6] |= 0x80; /* Link Supervision Timeout Changed */
585
586 if (lmp_ssp_capable(hdev)) {
587 events[6] |= 0x01; /* IO Capability Request */
588 events[6] |= 0x02; /* IO Capability Response */
589 events[6] |= 0x04; /* User Confirmation Request */
590 events[6] |= 0x08; /* User Passkey Request */
591 events[6] |= 0x10; /* Remote OOB Data Request */
592 events[6] |= 0x20; /* Simple Pairing Complete */
593 events[7] |= 0x04; /* User Passkey Notification */
594 events[7] |= 0x08; /* Keypress Notification */
595 events[7] |= 0x10; /* Remote Host Supported
596 * Features Notification
597 */
598 }
599
600 if (lmp_le_capable(hdev))
601 events[7] |= 0x20; /* LE Meta-Event */
602
Johan Hedberg42c6b122013-03-05 20:37:49 +0200603 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200604}
605
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200607{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200608 struct hci_dev *hdev = req->hdev;
609
Johan Hedberg0af801b2015-02-17 15:05:21 +0200610 if (hdev->dev_type == HCI_AMP)
611 return amp_init2(req);
612
Johan Hedberg2177bab2013-03-05 20:37:43 +0200613 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300615 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700616 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200617
618 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200619 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200620
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100621 /* All Bluetooth 1.2 and later controllers should support the
622 * HCI command for reading the local supported commands.
623 *
624 * Unfortunately some controllers indicate Bluetooth 1.2 support,
625 * but do not have support for this command. If that is the case,
626 * the driver can quirk the behavior and skip reading the local
627 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300628 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100629 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
630 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200631 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200632
633 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700634 /* When SSP is available, then the host features page
635 * should also be available as well. However some
636 * controllers list the max_page as 0 as long as SSP
637 * has not been enabled. To achieve proper debugging
638 * output, force the minimum max_page to 1 at least.
639 */
640 hdev->max_page = 0x01;
641
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700642 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200643 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800644
Johan Hedberg42c6b122013-03-05 20:37:49 +0200645 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
646 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200647 } else {
648 struct hci_cp_write_eir cp;
649
650 memset(hdev->eir, 0, sizeof(hdev->eir));
651 memset(&cp, 0, sizeof(cp));
652
Johan Hedberg42c6b122013-03-05 20:37:49 +0200653 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200654 }
655 }
656
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800657 if (lmp_inq_rssi_capable(hdev) ||
658 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800659 u8 mode;
660
661 /* If Extended Inquiry Result events are supported, then
662 * they are clearly preferred over Inquiry Result with RSSI
663 * events.
664 */
665 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
666
667 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
668 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200669
670 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200671 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200672
673 if (lmp_ext_feat_capable(hdev)) {
674 struct hci_cp_read_local_ext_features cp;
675
676 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
678 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200679 }
680
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700681 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200682 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
684 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200685 }
686}
687
Johan Hedberg42c6b122013-03-05 20:37:49 +0200688static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200689{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200690 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200691 struct hci_cp_write_def_link_policy cp;
692 u16 link_policy = 0;
693
694 if (lmp_rswitch_capable(hdev))
695 link_policy |= HCI_LP_RSWITCH;
696 if (lmp_hold_capable(hdev))
697 link_policy |= HCI_LP_HOLD;
698 if (lmp_sniff_capable(hdev))
699 link_policy |= HCI_LP_SNIFF;
700 if (lmp_park_capable(hdev))
701 link_policy |= HCI_LP_PARK;
702
703 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200704 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200705}
706
Johan Hedberg42c6b122013-03-05 20:37:49 +0200707static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200708{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200709 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200710 struct hci_cp_write_le_host_supported cp;
711
Johan Hedbergc73eee92013-04-19 18:35:21 +0300712 /* LE-only devices do not support explicit enablement */
713 if (!lmp_bredr_capable(hdev))
714 return;
715
Johan Hedberg2177bab2013-03-05 20:37:43 +0200716 memset(&cp, 0, sizeof(cp));
717
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700718 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200719 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200720 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200721 }
722
723 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200724 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
725 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200726}
727
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300728static void hci_set_event_mask_page_2(struct hci_request *req)
729{
730 struct hci_dev *hdev = req->hdev;
731 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
732
733 /* If Connectionless Slave Broadcast master role is supported
734 * enable all necessary events for it.
735 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800736 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300737 events[1] |= 0x40; /* Triggered Clock Capture */
738 events[1] |= 0x80; /* Synchronization Train Complete */
739 events[2] |= 0x10; /* Slave Page Response Timeout */
740 events[2] |= 0x20; /* CSB Channel Map Change */
741 }
742
743 /* If Connectionless Slave Broadcast slave role is supported
744 * enable all necessary events for it.
745 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800746 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300747 events[2] |= 0x01; /* Synchronization Train Received */
748 events[2] |= 0x02; /* CSB Receive */
749 events[2] |= 0x04; /* CSB Timeout */
750 events[2] |= 0x08; /* Truncated Page Complete */
751 }
752
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800753 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200754 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800755 events[2] |= 0x80;
756
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300757 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
758}
759
Johan Hedberg42c6b122013-03-05 20:37:49 +0200760static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200761{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200762 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300763 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200764
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200765 hci_setup_event_mask(req);
766
Johan Hedberge81be902015-08-30 21:47:20 +0300767 if (hdev->commands[6] & 0x20 &&
768 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800769 struct hci_cp_read_stored_link_key cp;
770
771 bacpy(&cp.bdaddr, BDADDR_ANY);
772 cp.read_all = 0x01;
773 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
774 }
775
Johan Hedberg2177bab2013-03-05 20:37:43 +0200776 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200777 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200778
Marcel Holtmann417287d2014-12-11 20:21:54 +0100779 if (hdev->commands[8] & 0x01)
780 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
781
782 /* Some older Broadcom based Bluetooth 1.2 controllers do not
783 * support the Read Page Scan Type command. Check support for
784 * this command in the bit mask of supported commands.
785 */
786 if (hdev->commands[13] & 0x01)
787 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
788
Andre Guedes9193c6e2014-07-01 18:10:09 -0300789 if (lmp_le_capable(hdev)) {
790 u8 events[8];
791
792 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200793 events[0] = 0x0f;
794
795 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
796 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300797
798 /* If controller supports the Connection Parameters Request
799 * Link Layer Procedure, enable the corresponding event.
800 */
801 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
802 events[0] |= 0x20; /* LE Remote Connection
803 * Parameter Request
804 */
805
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100806 /* If the controller supports the Data Length Extension
807 * feature, enable the corresponding event.
808 */
809 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
810 events[0] |= 0x40; /* LE Data Length Change */
811
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100812 /* If the controller supports Extended Scanner Filter
813 * Policies, enable the correspondig event.
814 */
815 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
816 events[1] |= 0x04; /* LE Direct Advertising
817 * Report
818 */
819
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100820 /* If the controller supports the LE Read Local P-256
821 * Public Key command, enable the corresponding event.
822 */
823 if (hdev->commands[34] & 0x02)
824 events[0] |= 0x80; /* LE Read Local P-256
825 * Public Key Complete
826 */
827
828 /* If the controller supports the LE Generate DHKey
829 * command, enable the corresponding event.
830 */
831 if (hdev->commands[34] & 0x04)
832 events[1] |= 0x01; /* LE Generate DHKey Complete */
833
Andre Guedes9193c6e2014-07-01 18:10:09 -0300834 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
835 events);
836
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200837 if (hdev->commands[25] & 0x40) {
838 /* Read LE Advertising Channel TX Power */
839 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
840 }
841
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100842 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
843 /* Read LE Maximum Data Length */
844 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
845
846 /* Read LE Suggested Default Data Length */
847 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
848 }
849
Johan Hedberg42c6b122013-03-05 20:37:49 +0200850 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300851 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300852
853 /* Read features beyond page 1 if available */
854 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
855 struct hci_cp_read_local_ext_features cp;
856
857 cp.page = p;
858 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
859 sizeof(cp), &cp);
860 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200861}
862
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300863static void hci_init4_req(struct hci_request *req, unsigned long opt)
864{
865 struct hci_dev *hdev = req->hdev;
866
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800867 /* Some Broadcom based Bluetooth controllers do not support the
868 * Delete Stored Link Key command. They are clearly indicating its
869 * absence in the bit mask of supported commands.
870 *
871 * Check the supported commands and only if the the command is marked
872 * as supported send it. If not supported assume that the controller
873 * does not have actual support for stored link keys which makes this
874 * command redundant anyway.
875 *
876 * Some controllers indicate that they support handling deleting
877 * stored link keys, but they don't. The quirk lets a driver
878 * just disable this command.
879 */
880 if (hdev->commands[6] & 0x80 &&
881 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
882 struct hci_cp_delete_stored_link_key cp;
883
884 bacpy(&cp.bdaddr, BDADDR_ANY);
885 cp.delete_all = 0x01;
886 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
887 sizeof(cp), &cp);
888 }
889
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300890 /* Set event mask page 2 if the HCI command for it is supported */
891 if (hdev->commands[22] & 0x04)
892 hci_set_event_mask_page_2(req);
893
Marcel Holtmann109e3192014-07-23 19:24:56 +0200894 /* Read local codec list if the HCI command is supported */
895 if (hdev->commands[29] & 0x20)
896 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
897
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200898 /* Get MWS transport configuration if the HCI command is supported */
899 if (hdev->commands[30] & 0x08)
900 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
901
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300902 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800903 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300904 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800905
906 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700907 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800908 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800909 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800910
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800911 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
912 sizeof(support), &support);
913 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300914}
915
Johan Hedberg2177bab2013-03-05 20:37:43 +0200916static int __hci_init(struct hci_dev *hdev)
917{
918 int err;
919
920 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
921 if (err < 0)
922 return err;
923
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200924 if (hci_dev_test_flag(hdev, HCI_SETUP))
925 hci_debugfs_create_basic(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700926
Johan Hedberg2177bab2013-03-05 20:37:43 +0200927 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
928 if (err < 0)
929 return err;
930
Johan Hedberg0af801b2015-02-17 15:05:21 +0200931 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
932 * BR/EDR/LE type controllers. AMP controllers only need the
933 * first two stages of init.
934 */
935 if (hdev->dev_type != HCI_BREDR)
936 return 0;
937
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300938 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
939 if (err < 0)
940 return err;
941
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700942 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
943 if (err < 0)
944 return err;
945
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800946 /* This function is only called when the controller is actually in
947 * configured state. When the controller is marked as unconfigured,
948 * this initialization procedure is not run.
949 *
950 * It means that it is possible that a controller runs through its
951 * setup phase and then discovers missing settings. If that is the
952 * case, then this function will not be called. It then will only
953 * be called during the config phase.
954 *
955 * So only when in setup phase or config phase, create the debugfs
956 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700957 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700958 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
959 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700960 return 0;
961
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100962 hci_debugfs_create_common(hdev);
963
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100964 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100965 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700966
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800967 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100968 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700969
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700970 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200971}
972
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200973static void hci_init0_req(struct hci_request *req, unsigned long opt)
974{
975 struct hci_dev *hdev = req->hdev;
976
977 BT_DBG("%s %ld", hdev->name, opt);
978
979 /* Reset */
980 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
981 hci_reset_req(req, 0);
982
983 /* Read Local Version */
984 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
985
986 /* Read BD Address */
987 if (hdev->set_bdaddr)
988 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
989}
990
991static int __hci_unconf_init(struct hci_dev *hdev)
992{
993 int err;
994
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200995 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
996 return 0;
997
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200998 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
999 if (err < 0)
1000 return err;
1001
Marcel Holtmannf640ee92015-10-08 12:35:42 +02001002 if (hci_dev_test_flag(hdev, HCI_SETUP))
1003 hci_debugfs_create_basic(hdev);
1004
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001005 return 0;
1006}
1007
Johan Hedberg42c6b122013-03-05 20:37:49 +02001008static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009{
1010 __u8 scan = opt;
1011
Johan Hedberg42c6b122013-03-05 20:37:49 +02001012 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
1014 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001015 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016}
1017
Johan Hedberg42c6b122013-03-05 20:37:49 +02001018static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019{
1020 __u8 auth = opt;
1021
Johan Hedberg42c6b122013-03-05 20:37:49 +02001022 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
1024 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001025 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026}
1027
Johan Hedberg42c6b122013-03-05 20:37:49 +02001028static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029{
1030 __u8 encrypt = opt;
1031
Johan Hedberg42c6b122013-03-05 20:37:49 +02001032 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001034 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001035 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036}
1037
Johan Hedberg42c6b122013-03-05 20:37:49 +02001038static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001039{
1040 __le16 policy = cpu_to_le16(opt);
1041
Johan Hedberg42c6b122013-03-05 20:37:49 +02001042 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001043
1044 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001045 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001046}
1047
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001048/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 * Device is held on return. */
1050struct hci_dev *hci_dev_get(int index)
1051{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001052 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
1054 BT_DBG("%d", index);
1055
1056 if (index < 0)
1057 return NULL;
1058
1059 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001060 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 if (d->id == index) {
1062 hdev = hci_dev_hold(d);
1063 break;
1064 }
1065 }
1066 read_unlock(&hci_dev_list_lock);
1067 return hdev;
1068}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069
1070/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001071
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001072bool hci_discovery_active(struct hci_dev *hdev)
1073{
1074 struct discovery_state *discov = &hdev->discovery;
1075
Andre Guedes6fbe1952012-02-03 17:47:58 -03001076 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001077 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001078 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001079 return true;
1080
Andre Guedes6fbe1952012-02-03 17:47:58 -03001081 default:
1082 return false;
1083 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001084}
1085
Johan Hedbergff9ef572012-01-04 14:23:45 +02001086void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001088 int old_state = hdev->discovery.state;
1089
Johan Hedbergff9ef572012-01-04 14:23:45 +02001090 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001092 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001093 return;
1094
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001095 hdev->discovery.state = state;
1096
Johan Hedbergff9ef572012-01-04 14:23:45 +02001097 switch (state) {
1098 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001099 hci_update_background_scan(hdev);
1100
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001101 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001102 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001103 break;
1104 case DISCOVERY_STARTING:
1105 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001106 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001107 mgmt_discovering(hdev, 1);
1108 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001109 case DISCOVERY_RESOLVING:
1110 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001111 case DISCOVERY_STOPPING:
1112 break;
1113 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001114}
1115
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001116void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117{
Johan Hedberg30883512012-01-04 14:16:21 +02001118 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001119 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
Johan Hedberg561aafb2012-01-04 13:31:59 +02001121 list_for_each_entry_safe(p, n, &cache->all, all) {
1122 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001123 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001125
1126 INIT_LIST_HEAD(&cache->unknown);
1127 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128}
1129
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001130struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1131 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132{
Johan Hedberg30883512012-01-04 14:16:21 +02001133 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 struct inquiry_entry *e;
1135
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001136 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
Johan Hedberg561aafb2012-01-04 13:31:59 +02001138 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001140 return e;
1141 }
1142
1143 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144}
1145
Johan Hedberg561aafb2012-01-04 13:31:59 +02001146struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001147 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001148{
Johan Hedberg30883512012-01-04 14:16:21 +02001149 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001150 struct inquiry_entry *e;
1151
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001152 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001153
1154 list_for_each_entry(e, &cache->unknown, list) {
1155 if (!bacmp(&e->data.bdaddr, bdaddr))
1156 return e;
1157 }
1158
1159 return NULL;
1160}
1161
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001162struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001163 bdaddr_t *bdaddr,
1164 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001165{
1166 struct discovery_state *cache = &hdev->discovery;
1167 struct inquiry_entry *e;
1168
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001169 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001170
1171 list_for_each_entry(e, &cache->resolve, list) {
1172 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1173 return e;
1174 if (!bacmp(&e->data.bdaddr, bdaddr))
1175 return e;
1176 }
1177
1178 return NULL;
1179}
1180
Johan Hedberga3d4e202012-01-09 00:53:02 +02001181void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001182 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001183{
1184 struct discovery_state *cache = &hdev->discovery;
1185 struct list_head *pos = &cache->resolve;
1186 struct inquiry_entry *p;
1187
1188 list_del(&ie->list);
1189
1190 list_for_each_entry(p, &cache->resolve, list) {
1191 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001192 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001193 break;
1194 pos = &p->list;
1195 }
1196
1197 list_add(&ie->list, pos);
1198}
1199
Marcel Holtmannaf589252014-07-01 14:11:20 +02001200u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1201 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202{
Johan Hedberg30883512012-01-04 14:16:21 +02001203 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001204 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001205 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001207 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
Johan Hedberg6928a922014-10-26 20:46:09 +01001209 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001210
Marcel Holtmannaf589252014-07-01 14:11:20 +02001211 if (!data->ssp_mode)
1212 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001213
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001214 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001215 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001216 if (!ie->data.ssp_mode)
1217 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001218
Johan Hedberga3d4e202012-01-09 00:53:02 +02001219 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001220 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001221 ie->data.rssi = data->rssi;
1222 hci_inquiry_cache_update_resolve(hdev, ie);
1223 }
1224
Johan Hedberg561aafb2012-01-04 13:31:59 +02001225 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001226 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001227
Johan Hedberg561aafb2012-01-04 13:31:59 +02001228 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001229 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001230 if (!ie) {
1231 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1232 goto done;
1233 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001234
1235 list_add(&ie->all, &cache->all);
1236
1237 if (name_known) {
1238 ie->name_state = NAME_KNOWN;
1239 } else {
1240 ie->name_state = NAME_NOT_KNOWN;
1241 list_add(&ie->list, &cache->unknown);
1242 }
1243
1244update:
1245 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001246 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001247 ie->name_state = NAME_KNOWN;
1248 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 }
1250
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001251 memcpy(&ie->data, data, sizeof(*data));
1252 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001254
1255 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001256 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001257
Marcel Holtmannaf589252014-07-01 14:11:20 +02001258done:
1259 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260}
1261
1262static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1263{
Johan Hedberg30883512012-01-04 14:16:21 +02001264 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 struct inquiry_info *info = (struct inquiry_info *) buf;
1266 struct inquiry_entry *e;
1267 int copied = 0;
1268
Johan Hedberg561aafb2012-01-04 13:31:59 +02001269 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001271
1272 if (copied >= num)
1273 break;
1274
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 bacpy(&info->bdaddr, &data->bdaddr);
1276 info->pscan_rep_mode = data->pscan_rep_mode;
1277 info->pscan_period_mode = data->pscan_period_mode;
1278 info->pscan_mode = data->pscan_mode;
1279 memcpy(info->dev_class, data->dev_class, 3);
1280 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001281
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001283 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 }
1285
1286 BT_DBG("cache %p, copied %d", cache, copied);
1287 return copied;
1288}
1289
Johan Hedberg42c6b122013-03-05 20:37:49 +02001290static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291{
1292 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 struct hci_cp_inquiry cp;
1295
1296 BT_DBG("%s", hdev->name);
1297
1298 if (test_bit(HCI_INQUIRY, &hdev->flags))
1299 return;
1300
1301 /* Start Inquiry */
1302 memcpy(&cp.lap, &ir->lap, 3);
1303 cp.length = ir->length;
1304 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001305 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306}
1307
1308int hci_inquiry(void __user *arg)
1309{
1310 __u8 __user *ptr = arg;
1311 struct hci_inquiry_req ir;
1312 struct hci_dev *hdev;
1313 int err = 0, do_inquiry = 0, max_rsp;
1314 long timeo;
1315 __u8 *buf;
1316
1317 if (copy_from_user(&ir, ptr, sizeof(ir)))
1318 return -EFAULT;
1319
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001320 hdev = hci_dev_get(ir.dev_id);
1321 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 return -ENODEV;
1323
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001324 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001325 err = -EBUSY;
1326 goto done;
1327 }
1328
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001329 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001330 err = -EOPNOTSUPP;
1331 goto done;
1332 }
1333
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001334 if (hdev->dev_type != HCI_BREDR) {
1335 err = -EOPNOTSUPP;
1336 goto done;
1337 }
1338
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001339 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001340 err = -EOPNOTSUPP;
1341 goto done;
1342 }
1343
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001344 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001345 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001346 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001347 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 do_inquiry = 1;
1349 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001350 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351
Marcel Holtmann04837f62006-07-03 10:02:33 +02001352 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001353
1354 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001355 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1356 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001357 if (err < 0)
1358 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001359
1360 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1361 * cleared). If it is interrupted by a signal, return -EINTR.
1362 */
NeilBrown74316202014-07-07 15:16:04 +10001363 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001364 TASK_INTERRUPTIBLE))
1365 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001366 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001368 /* for unlimited number of responses we will use buffer with
1369 * 255 entries
1370 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1372
1373 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1374 * copy it to the user space.
1375 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001376 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001377 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 err = -ENOMEM;
1379 goto done;
1380 }
1381
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001382 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001384 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
1386 BT_DBG("num_rsp %d", ir.num_rsp);
1387
1388 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1389 ptr += sizeof(ir);
1390 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001391 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001393 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 err = -EFAULT;
1395
1396 kfree(buf);
1397
1398done:
1399 hci_dev_put(hdev);
1400 return err;
1401}
1402
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001403static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 int ret = 0;
1406
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 BT_DBG("%s %p", hdev->name, hdev);
1408
1409 hci_req_lock(hdev);
1410
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001411 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001412 ret = -ENODEV;
1413 goto done;
1414 }
1415
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001416 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1417 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001418 /* Check for rfkill but allow the HCI setup stage to
1419 * proceed (which in itself doesn't cause any RF activity).
1420 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001421 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001422 ret = -ERFKILL;
1423 goto done;
1424 }
1425
1426 /* Check for valid public address or a configured static
1427 * random adddress, but let the HCI setup proceed to
1428 * be able to determine if there is a public address
1429 * or not.
1430 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001431 * In case of user channel usage, it is not important
1432 * if a public address or static random address is
1433 * available.
1434 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001435 * This check is only valid for BR/EDR controllers
1436 * since AMP controllers do not have an address.
1437 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001438 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001439 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001440 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1441 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1442 ret = -EADDRNOTAVAIL;
1443 goto done;
1444 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001445 }
1446
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 if (test_bit(HCI_UP, &hdev->flags)) {
1448 ret = -EALREADY;
1449 goto done;
1450 }
1451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 if (hdev->open(hdev)) {
1453 ret = -EIO;
1454 goto done;
1455 }
1456
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001457 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001458 hci_notify(hdev, HCI_DEV_OPEN);
1459
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001460 atomic_set(&hdev->cmd_cnt, 1);
1461 set_bit(HCI_INIT, &hdev->flags);
1462
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001463 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmanne131d742015-10-20 02:30:47 +02001464 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1465
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001466 if (hdev->setup)
1467 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001468
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001469 /* The transport driver can set these quirks before
1470 * creating the HCI device or in its setup callback.
1471 *
1472 * In case any of them is set, the controller has to
1473 * start up as unconfigured.
1474 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001475 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1476 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001477 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001478
1479 /* For an unconfigured controller it is required to
1480 * read at least the version information provided by
1481 * the Read Local Version Information command.
1482 *
1483 * If the set_bdaddr driver callback is provided, then
1484 * also the original Bluetooth public device address
1485 * will be read using the Read BD Address command.
1486 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001487 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001488 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001489 }
1490
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001491 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001492 /* If public address change is configured, ensure that
1493 * the address gets programmed. If the driver does not
1494 * support changing the public address, fail the power
1495 * on procedure.
1496 */
1497 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1498 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001499 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1500 else
1501 ret = -EADDRNOTAVAIL;
1502 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001503
1504 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001505 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001506 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001507 ret = __hci_init(hdev);
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001508 if (!ret && hdev->post_init)
1509 ret = hdev->post_init(hdev);
1510 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 }
1512
Marcel Holtmann7e995b92015-10-17 16:00:26 +02001513 /* If the HCI Reset command is clearing all diagnostic settings,
1514 * then they need to be reprogrammed after the init procedure
1515 * completed.
1516 */
1517 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1518 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1519 ret = hdev->set_diag(hdev, true);
1520
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001521 clear_bit(HCI_INIT, &hdev->flags);
1522
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 if (!ret) {
1524 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001525 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 set_bit(HCI_UP, &hdev->flags);
1527 hci_notify(hdev, HCI_DEV_UP);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001528 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1529 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1530 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1531 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001532 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001533 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001534 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001535 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001536 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001537 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001539 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001540 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001541 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
1543 skb_queue_purge(&hdev->cmd_q);
1544 skb_queue_purge(&hdev->rx_q);
1545
1546 if (hdev->flush)
1547 hdev->flush(hdev);
1548
1549 if (hdev->sent_cmd) {
1550 kfree_skb(hdev->sent_cmd);
1551 hdev->sent_cmd = NULL;
1552 }
1553
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001554 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001555 hci_notify(hdev, HCI_DEV_CLOSE);
1556
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001558 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 }
1560
1561done:
1562 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 return ret;
1564}
1565
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001566/* ---- HCI ioctl helpers ---- */
1567
1568int hci_dev_open(__u16 dev)
1569{
1570 struct hci_dev *hdev;
1571 int err;
1572
1573 hdev = hci_dev_get(dev);
1574 if (!hdev)
1575 return -ENODEV;
1576
Marcel Holtmann4a964402014-07-02 19:10:33 +02001577 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001578 * up as user channel. Trying to bring them up as normal devices
1579 * will result into a failure. Only user channel operation is
1580 * possible.
1581 *
1582 * When this function is called for a user channel, the flag
1583 * HCI_USER_CHANNEL will be set first before attempting to
1584 * open the device.
1585 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001586 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1587 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001588 err = -EOPNOTSUPP;
1589 goto done;
1590 }
1591
Johan Hedberge1d08f42013-10-01 22:44:50 +03001592 /* We need to ensure that no other power on/off work is pending
1593 * before proceeding to call hci_dev_do_open. This is
1594 * particularly important if the setup procedure has not yet
1595 * completed.
1596 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001597 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001598 cancel_delayed_work(&hdev->power_off);
1599
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001600 /* After this call it is guaranteed that the setup procedure
1601 * has finished. This means that error conditions like RFKILL
1602 * or no valid public or static random address apply.
1603 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001604 flush_workqueue(hdev->req_workqueue);
1605
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001606 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001607 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001608 * so that pairing works for them. Once the management interface
1609 * is in use this bit will be cleared again and userspace has
1610 * to explicitly enable it.
1611 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001612 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1613 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001614 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001615
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001616 err = hci_dev_do_open(hdev);
1617
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001618done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001619 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001620 return err;
1621}
1622
Johan Hedbergd7347f32014-07-04 12:37:23 +03001623/* This function requires the caller holds hdev->lock */
1624static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1625{
1626 struct hci_conn_params *p;
1627
Johan Hedbergf161dd42014-08-15 21:06:54 +03001628 list_for_each_entry(p, &hdev->le_conn_params, list) {
1629 if (p->conn) {
1630 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001631 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001632 p->conn = NULL;
1633 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001634 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001635 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001636
1637 BT_DBG("All LE pending actions cleared");
1638}
1639
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001640int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641{
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001642 bool auto_off;
1643
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 BT_DBG("%s %p", hdev->name, hdev);
1645
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001646 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001647 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001648 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001649 /* Execute vendor specific shutdown routine */
1650 if (hdev->shutdown)
1651 hdev->shutdown(hdev);
1652 }
1653
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001654 cancel_delayed_work(&hdev->power_off);
1655
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 hci_req_cancel(hdev, ENODEV);
1657 hci_req_lock(hdev);
1658
1659 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001660 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 hci_req_unlock(hdev);
1662 return 0;
1663 }
1664
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001665 /* Flush RX and TX works */
1666 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001667 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001669 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001670 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001671 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001672 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1673 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001674 }
1675
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001676 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001677 cancel_delayed_work(&hdev->service_cache);
1678
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001679 cancel_delayed_work_sync(&hdev->le_scan_disable);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08001680 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001681
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001682 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001683 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001684
Florian Grandel5d900e42015-06-18 03:16:35 +02001685 if (hdev->adv_instance_timeout) {
1686 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1687 hdev->adv_instance_timeout = 0;
1688 }
1689
Johan Hedberg76727c02014-11-18 09:00:14 +02001690 /* Avoid potential lockdep warnings from the *_flush() calls by
1691 * ensuring the workqueue is empty up front.
1692 */
1693 drain_workqueue(hdev->workqueue);
1694
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001695 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001696
Johan Hedberg8f502f82015-01-28 19:56:02 +02001697 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1698
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001699 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1700
1701 if (!auto_off && hdev->dev_type == HCI_BREDR)
1702 mgmt_powered(hdev, 0);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001703
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001704 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001705 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001706 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001707 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
Marcel Holtmann64dae962015-01-28 14:10:28 -08001709 smp_unregister(hdev);
1710
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 hci_notify(hdev, HCI_DEV_DOWN);
1712
1713 if (hdev->flush)
1714 hdev->flush(hdev);
1715
1716 /* Reset device */
1717 skb_queue_purge(&hdev->cmd_q);
1718 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001719 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1720 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001722 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 clear_bit(HCI_INIT, &hdev->flags);
1724 }
1725
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001726 /* flush cmd work */
1727 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
1729 /* Drop queues */
1730 skb_queue_purge(&hdev->rx_q);
1731 skb_queue_purge(&hdev->cmd_q);
1732 skb_queue_purge(&hdev->raw_q);
1733
1734 /* Drop last sent command */
1735 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001736 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 kfree_skb(hdev->sent_cmd);
1738 hdev->sent_cmd = NULL;
1739 }
1740
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001741 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001742 hci_notify(hdev, HCI_DEV_CLOSE);
1743
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 /* After this point our queues are empty
1745 * and no tasks are scheduled. */
1746 hdev->close(hdev);
1747
Johan Hedberg35b973c2013-03-15 17:06:59 -05001748 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001749 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001750 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001751
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001752 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001753 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001754
Johan Hedberge59fda82012-02-22 18:11:53 +02001755 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001756 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001757 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001758
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 hci_req_unlock(hdev);
1760
1761 hci_dev_put(hdev);
1762 return 0;
1763}
1764
1765int hci_dev_close(__u16 dev)
1766{
1767 struct hci_dev *hdev;
1768 int err;
1769
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001770 hdev = hci_dev_get(dev);
1771 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001773
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001774 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001775 err = -EBUSY;
1776 goto done;
1777 }
1778
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001779 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001780 cancel_delayed_work(&hdev->power_off);
1781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001783
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001784done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 hci_dev_put(hdev);
1786 return err;
1787}
1788
Marcel Holtmann5c912492015-01-28 11:53:05 -08001789static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001791 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792
Marcel Holtmann5c912492015-01-28 11:53:05 -08001793 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794
1795 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 /* Drop queues */
1798 skb_queue_purge(&hdev->rx_q);
1799 skb_queue_purge(&hdev->cmd_q);
1800
Johan Hedberg76727c02014-11-18 09:00:14 +02001801 /* Avoid potential lockdep warnings from the *_flush() calls by
1802 * ensuring the workqueue is empty up front.
1803 */
1804 drain_workqueue(hdev->workqueue);
1805
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001806 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001807 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001809 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
1811 if (hdev->flush)
1812 hdev->flush(hdev);
1813
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001814 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001815 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001817 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 return ret;
1821}
1822
Marcel Holtmann5c912492015-01-28 11:53:05 -08001823int hci_dev_reset(__u16 dev)
1824{
1825 struct hci_dev *hdev;
1826 int err;
1827
1828 hdev = hci_dev_get(dev);
1829 if (!hdev)
1830 return -ENODEV;
1831
1832 if (!test_bit(HCI_UP, &hdev->flags)) {
1833 err = -ENETDOWN;
1834 goto done;
1835 }
1836
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001837 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001838 err = -EBUSY;
1839 goto done;
1840 }
1841
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001842 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001843 err = -EOPNOTSUPP;
1844 goto done;
1845 }
1846
1847 err = hci_dev_do_reset(hdev);
1848
1849done:
1850 hci_dev_put(hdev);
1851 return err;
1852}
1853
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854int hci_dev_reset_stat(__u16 dev)
1855{
1856 struct hci_dev *hdev;
1857 int ret = 0;
1858
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001859 hdev = hci_dev_get(dev);
1860 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 return -ENODEV;
1862
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001863 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001864 ret = -EBUSY;
1865 goto done;
1866 }
1867
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001868 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001869 ret = -EOPNOTSUPP;
1870 goto done;
1871 }
1872
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1874
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001875done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 return ret;
1878}
1879
Johan Hedberg123abc02014-07-10 12:09:07 +03001880static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1881{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001882 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001883
1884 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1885
1886 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001887 conn_changed = !hci_dev_test_and_set_flag(hdev,
1888 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001889 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001890 conn_changed = hci_dev_test_and_clear_flag(hdev,
1891 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001892
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001893 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001894 discov_changed = !hci_dev_test_and_set_flag(hdev,
1895 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001896 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001897 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001898 discov_changed = hci_dev_test_and_clear_flag(hdev,
1899 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001900 }
1901
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001902 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001903 return;
1904
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001905 if (conn_changed || discov_changed) {
1906 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001907 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001908
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001909 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001910 mgmt_update_adv_data(hdev);
1911
Johan Hedberg123abc02014-07-10 12:09:07 +03001912 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001913 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001914}
1915
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916int hci_dev_cmd(unsigned int cmd, void __user *arg)
1917{
1918 struct hci_dev *hdev;
1919 struct hci_dev_req dr;
1920 int err = 0;
1921
1922 if (copy_from_user(&dr, arg, sizeof(dr)))
1923 return -EFAULT;
1924
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001925 hdev = hci_dev_get(dr.dev_id);
1926 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 return -ENODEV;
1928
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001929 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001930 err = -EBUSY;
1931 goto done;
1932 }
1933
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001934 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001935 err = -EOPNOTSUPP;
1936 goto done;
1937 }
1938
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001939 if (hdev->dev_type != HCI_BREDR) {
1940 err = -EOPNOTSUPP;
1941 goto done;
1942 }
1943
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001944 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001945 err = -EOPNOTSUPP;
1946 goto done;
1947 }
1948
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 switch (cmd) {
1950 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001951 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1952 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 break;
1954
1955 case HCISETENCRYPT:
1956 if (!lmp_encrypt_capable(hdev)) {
1957 err = -EOPNOTSUPP;
1958 break;
1959 }
1960
1961 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1962 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001963 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1964 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 if (err)
1966 break;
1967 }
1968
Johan Hedberg01178cd2013-03-05 20:37:41 +02001969 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1970 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 break;
1972
1973 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001974 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1975 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001976
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001977 /* Ensure that the connectable and discoverable states
1978 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001979 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001980 if (!err)
1981 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 break;
1983
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001984 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001985 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1986 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001987 break;
1988
1989 case HCISETLINKMODE:
1990 hdev->link_mode = ((__u16) dr.dev_opt) &
1991 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1992 break;
1993
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 case HCISETPTYPE:
1995 hdev->pkt_type = (__u16) dr.dev_opt;
1996 break;
1997
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001999 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2000 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 break;
2002
2003 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002004 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2005 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 break;
2007
2008 default:
2009 err = -EINVAL;
2010 break;
2011 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002012
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002013done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 hci_dev_put(hdev);
2015 return err;
2016}
2017
2018int hci_get_dev_list(void __user *arg)
2019{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002020 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 struct hci_dev_list_req *dl;
2022 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 int n = 0, size, err;
2024 __u16 dev_num;
2025
2026 if (get_user(dev_num, (__u16 __user *) arg))
2027 return -EFAULT;
2028
2029 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2030 return -EINVAL;
2031
2032 size = sizeof(*dl) + dev_num * sizeof(*dr);
2033
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002034 dl = kzalloc(size, GFP_KERNEL);
2035 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 return -ENOMEM;
2037
2038 dr = dl->dev_req;
2039
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002040 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002041 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002042 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002043
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002044 /* When the auto-off is configured it means the transport
2045 * is running, but in that case still indicate that the
2046 * device is actually down.
2047 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002048 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002049 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002050
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002052 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002053
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 if (++n >= dev_num)
2055 break;
2056 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002057 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058
2059 dl->dev_num = n;
2060 size = sizeof(*dl) + n * sizeof(*dr);
2061
2062 err = copy_to_user(arg, dl, size);
2063 kfree(dl);
2064
2065 return err ? -EFAULT : 0;
2066}
2067
2068int hci_get_dev_info(void __user *arg)
2069{
2070 struct hci_dev *hdev;
2071 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002072 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 int err = 0;
2074
2075 if (copy_from_user(&di, arg, sizeof(di)))
2076 return -EFAULT;
2077
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002078 hdev = hci_dev_get(di.dev_id);
2079 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 return -ENODEV;
2081
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002082 /* When the auto-off is configured it means the transport
2083 * is running, but in that case still indicate that the
2084 * device is actually down.
2085 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002086 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002087 flags = hdev->flags & ~BIT(HCI_UP);
2088 else
2089 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002090
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 strcpy(di.name, hdev->name);
2092 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002093 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002094 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002096 if (lmp_bredr_capable(hdev)) {
2097 di.acl_mtu = hdev->acl_mtu;
2098 di.acl_pkts = hdev->acl_pkts;
2099 di.sco_mtu = hdev->sco_mtu;
2100 di.sco_pkts = hdev->sco_pkts;
2101 } else {
2102 di.acl_mtu = hdev->le_mtu;
2103 di.acl_pkts = hdev->le_pkts;
2104 di.sco_mtu = 0;
2105 di.sco_pkts = 0;
2106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 di.link_policy = hdev->link_policy;
2108 di.link_mode = hdev->link_mode;
2109
2110 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2111 memcpy(&di.features, &hdev->features, sizeof(di.features));
2112
2113 if (copy_to_user(arg, &di, sizeof(di)))
2114 err = -EFAULT;
2115
2116 hci_dev_put(hdev);
2117
2118 return err;
2119}
2120
2121/* ---- Interface to HCI drivers ---- */
2122
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002123static int hci_rfkill_set_block(void *data, bool blocked)
2124{
2125 struct hci_dev *hdev = data;
2126
2127 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2128
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002129 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002130 return -EBUSY;
2131
Johan Hedberg5e130362013-09-13 08:58:17 +03002132 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002133 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002134 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2135 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002136 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002137 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002138 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002139 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002140
2141 return 0;
2142}
2143
2144static const struct rfkill_ops hci_rfkill_ops = {
2145 .set_block = hci_rfkill_set_block,
2146};
2147
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002148static void hci_power_on(struct work_struct *work)
2149{
2150 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002151 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002152
2153 BT_DBG("%s", hdev->name);
2154
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002155 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002156 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302157 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002158 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302159 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002160 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002161 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002162
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002163 /* During the HCI setup phase, a few error conditions are
2164 * ignored and they need to be checked now. If they are still
2165 * valid, it is important to turn the device back off.
2166 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002167 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2168 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002169 (hdev->dev_type == HCI_BREDR &&
2170 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2171 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002172 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002173 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002174 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002175 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2176 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002177 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002178
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002179 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002180 /* For unconfigured devices, set the HCI_RAW flag
2181 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002182 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002183 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002184 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002185
2186 /* For fully configured devices, this will send
2187 * the Index Added event. For unconfigured devices,
2188 * it will send Unconfigued Index Added event.
2189 *
2190 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2191 * and no event will be send.
2192 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002193 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002194 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002195 /* When the controller is now configured, then it
2196 * is important to clear the HCI_RAW flag.
2197 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002198 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002199 clear_bit(HCI_RAW, &hdev->flags);
2200
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002201 /* Powering on the controller with HCI_CONFIG set only
2202 * happens with the transition from unconfigured to
2203 * configured. This will send the Index Added event.
2204 */
2205 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002206 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002207}
2208
2209static void hci_power_off(struct work_struct *work)
2210{
Johan Hedberg32435532011-11-07 22:16:04 +02002211 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002212 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002213
2214 BT_DBG("%s", hdev->name);
2215
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002216 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002217}
2218
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002219static void hci_error_reset(struct work_struct *work)
2220{
2221 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2222
2223 BT_DBG("%s", hdev->name);
2224
2225 if (hdev->hw_error)
2226 hdev->hw_error(hdev, hdev->hw_error_code);
2227 else
2228 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2229 hdev->hw_error_code);
2230
2231 if (hci_dev_do_close(hdev))
2232 return;
2233
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002234 hci_dev_do_open(hdev);
2235}
2236
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002237static void hci_discov_off(struct work_struct *work)
2238{
2239 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002240
2241 hdev = container_of(work, struct hci_dev, discov_off.work);
2242
2243 BT_DBG("%s", hdev->name);
2244
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002245 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002246}
2247
Florian Grandel5d900e42015-06-18 03:16:35 +02002248static void hci_adv_timeout_expire(struct work_struct *work)
2249{
2250 struct hci_dev *hdev;
2251
2252 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2253
2254 BT_DBG("%s", hdev->name);
2255
2256 mgmt_adv_timeout_expired(hdev);
2257}
2258
Johan Hedberg35f74982014-02-18 17:14:32 +02002259void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002260{
Johan Hedberg48210022013-01-27 00:31:28 +02002261 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002262
Johan Hedberg48210022013-01-27 00:31:28 +02002263 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2264 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002265 kfree(uuid);
2266 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002267}
2268
Johan Hedberg35f74982014-02-18 17:14:32 +02002269void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002270{
Johan Hedberg0378b592014-11-19 15:22:22 +02002271 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002272
Johan Hedberg0378b592014-11-19 15:22:22 +02002273 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2274 list_del_rcu(&key->list);
2275 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002276 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002277}
2278
Johan Hedberg35f74982014-02-18 17:14:32 +02002279void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002280{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002281 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002282
Johan Hedberg970d0f12014-11-13 14:37:47 +02002283 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2284 list_del_rcu(&k->list);
2285 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002286 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002287}
2288
Johan Hedberg970c4e42014-02-18 10:19:33 +02002289void hci_smp_irks_clear(struct hci_dev *hdev)
2290{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002291 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002292
Johan Hedbergadae20c2014-11-13 14:37:48 +02002293 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2294 list_del_rcu(&k->list);
2295 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002296 }
2297}
2298
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002299struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2300{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002301 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002302
Johan Hedberg0378b592014-11-19 15:22:22 +02002303 rcu_read_lock();
2304 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2305 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2306 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002307 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002308 }
2309 }
2310 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002311
2312 return NULL;
2313}
2314
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302315static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002316 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002317{
2318 /* Legacy key */
2319 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302320 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002321
2322 /* Debug keys are insecure so don't store them persistently */
2323 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302324 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002325
2326 /* Changed combination key and there's no previous one */
2327 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302328 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002329
2330 /* Security mode 3 case */
2331 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302332 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002333
Johan Hedberge3befab2014-06-01 16:33:39 +03002334 /* BR/EDR key derived using SC from an LE link */
2335 if (conn->type == LE_LINK)
2336 return true;
2337
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002338 /* Neither local nor remote side had no-bonding as requirement */
2339 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302340 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002341
2342 /* Local side had dedicated bonding as requirement */
2343 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302344 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002345
2346 /* Remote side had dedicated bonding as requirement */
2347 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302348 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002349
2350 /* If none of the above criteria match, then don't store the key
2351 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302352 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002353}
2354
Johan Hedberge804d252014-07-16 11:42:28 +03002355static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002356{
Johan Hedberge804d252014-07-16 11:42:28 +03002357 if (type == SMP_LTK)
2358 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002359
Johan Hedberge804d252014-07-16 11:42:28 +03002360 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002361}
2362
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002363struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2364 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002365{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002366 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002367
Johan Hedberg970d0f12014-11-13 14:37:47 +02002368 rcu_read_lock();
2369 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002370 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2371 continue;
2372
Johan Hedberg923e2412014-12-03 12:43:39 +02002373 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002374 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002375 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002376 }
2377 }
2378 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002379
2380 return NULL;
2381}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002382
Johan Hedberg970c4e42014-02-18 10:19:33 +02002383struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2384{
2385 struct smp_irk *irk;
2386
Johan Hedbergadae20c2014-11-13 14:37:48 +02002387 rcu_read_lock();
2388 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2389 if (!bacmp(&irk->rpa, rpa)) {
2390 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002391 return irk;
2392 }
2393 }
2394
Johan Hedbergadae20c2014-11-13 14:37:48 +02002395 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2396 if (smp_irk_matches(hdev, irk->val, rpa)) {
2397 bacpy(&irk->rpa, rpa);
2398 rcu_read_unlock();
2399 return irk;
2400 }
2401 }
2402 rcu_read_unlock();
2403
Johan Hedberg970c4e42014-02-18 10:19:33 +02002404 return NULL;
2405}
2406
2407struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2408 u8 addr_type)
2409{
2410 struct smp_irk *irk;
2411
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002412 /* Identity Address must be public or static random */
2413 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2414 return NULL;
2415
Johan Hedbergadae20c2014-11-13 14:37:48 +02002416 rcu_read_lock();
2417 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002418 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002419 bacmp(bdaddr, &irk->bdaddr) == 0) {
2420 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002421 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002422 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002423 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002424 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002425
2426 return NULL;
2427}
2428
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002429struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002430 bdaddr_t *bdaddr, u8 *val, u8 type,
2431 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002432{
2433 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302434 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002435
2436 old_key = hci_find_link_key(hdev, bdaddr);
2437 if (old_key) {
2438 old_key_type = old_key->type;
2439 key = old_key;
2440 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002441 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002442 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002443 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002444 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002445 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002446 }
2447
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002448 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002449
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002450 /* Some buggy controller combinations generate a changed
2451 * combination key for legacy pairing even when there's no
2452 * previous key */
2453 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002454 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002455 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002456 if (conn)
2457 conn->key_type = type;
2458 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002459
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002460 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002461 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002462 key->pin_len = pin_len;
2463
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002464 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002465 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002466 else
2467 key->type = type;
2468
Johan Hedberg7652ff62014-06-24 13:15:49 +03002469 if (persistent)
2470 *persistent = hci_persistent_key(hdev, conn, type,
2471 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002472
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002473 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002474}
2475
Johan Hedbergca9142b2014-02-19 14:57:44 +02002476struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002477 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002478 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002479{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002480 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002481 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002482
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002483 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002484 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002485 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002486 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002487 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002488 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002489 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002490 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002491 }
2492
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002493 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002494 key->bdaddr_type = addr_type;
2495 memcpy(key->val, tk, sizeof(key->val));
2496 key->authenticated = authenticated;
2497 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002498 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002499 key->enc_size = enc_size;
2500 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002501
Johan Hedbergca9142b2014-02-19 14:57:44 +02002502 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002503}
2504
Johan Hedbergca9142b2014-02-19 14:57:44 +02002505struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2506 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002507{
2508 struct smp_irk *irk;
2509
2510 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2511 if (!irk) {
2512 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2513 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002514 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002515
2516 bacpy(&irk->bdaddr, bdaddr);
2517 irk->addr_type = addr_type;
2518
Johan Hedbergadae20c2014-11-13 14:37:48 +02002519 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002520 }
2521
2522 memcpy(irk->val, val, 16);
2523 bacpy(&irk->rpa, rpa);
2524
Johan Hedbergca9142b2014-02-19 14:57:44 +02002525 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002526}
2527
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002528int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2529{
2530 struct link_key *key;
2531
2532 key = hci_find_link_key(hdev, bdaddr);
2533 if (!key)
2534 return -ENOENT;
2535
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002536 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002537
Johan Hedberg0378b592014-11-19 15:22:22 +02002538 list_del_rcu(&key->list);
2539 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002540
2541 return 0;
2542}
2543
Johan Hedberge0b2b272014-02-18 17:14:31 +02002544int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002545{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002546 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002547 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002548
Johan Hedberg970d0f12014-11-13 14:37:47 +02002549 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002550 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002551 continue;
2552
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002553 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002554
Johan Hedberg970d0f12014-11-13 14:37:47 +02002555 list_del_rcu(&k->list);
2556 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002557 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002558 }
2559
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002560 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002561}
2562
Johan Hedberga7ec7332014-02-18 17:14:35 +02002563void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2564{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002565 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002566
Johan Hedbergadae20c2014-11-13 14:37:48 +02002567 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002568 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2569 continue;
2570
2571 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2572
Johan Hedbergadae20c2014-11-13 14:37:48 +02002573 list_del_rcu(&k->list);
2574 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002575 }
2576}
2577
Johan Hedberg55e76b32015-03-10 22:34:40 +02002578bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2579{
2580 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002581 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002582 u8 addr_type;
2583
2584 if (type == BDADDR_BREDR) {
2585 if (hci_find_link_key(hdev, bdaddr))
2586 return true;
2587 return false;
2588 }
2589
2590 /* Convert to HCI addr type which struct smp_ltk uses */
2591 if (type == BDADDR_LE_PUBLIC)
2592 addr_type = ADDR_LE_DEV_PUBLIC;
2593 else
2594 addr_type = ADDR_LE_DEV_RANDOM;
2595
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002596 irk = hci_get_irk(hdev, bdaddr, addr_type);
2597 if (irk) {
2598 bdaddr = &irk->bdaddr;
2599 addr_type = irk->addr_type;
2600 }
2601
Johan Hedberg55e76b32015-03-10 22:34:40 +02002602 rcu_read_lock();
2603 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002604 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2605 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002606 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002607 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002608 }
2609 rcu_read_unlock();
2610
2611 return false;
2612}
2613
Ville Tervo6bd32322011-02-16 16:32:41 +02002614/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002615static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002616{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002617 struct hci_dev *hdev = container_of(work, struct hci_dev,
2618 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002619
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002620 if (hdev->sent_cmd) {
2621 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2622 u16 opcode = __le16_to_cpu(sent->opcode);
2623
2624 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2625 } else {
2626 BT_ERR("%s command tx timeout", hdev->name);
2627 }
2628
Ville Tervo6bd32322011-02-16 16:32:41 +02002629 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002630 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002631}
2632
Szymon Janc2763eda2011-03-22 13:12:22 +01002633struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002634 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002635{
2636 struct oob_data *data;
2637
Johan Hedberg6928a922014-10-26 20:46:09 +01002638 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2639 if (bacmp(bdaddr, &data->bdaddr) != 0)
2640 continue;
2641 if (data->bdaddr_type != bdaddr_type)
2642 continue;
2643 return data;
2644 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002645
2646 return NULL;
2647}
2648
Johan Hedberg6928a922014-10-26 20:46:09 +01002649int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2650 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002651{
2652 struct oob_data *data;
2653
Johan Hedberg6928a922014-10-26 20:46:09 +01002654 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002655 if (!data)
2656 return -ENOENT;
2657
Johan Hedberg6928a922014-10-26 20:46:09 +01002658 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002659
2660 list_del(&data->list);
2661 kfree(data);
2662
2663 return 0;
2664}
2665
Johan Hedberg35f74982014-02-18 17:14:32 +02002666void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002667{
2668 struct oob_data *data, *n;
2669
2670 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2671 list_del(&data->list);
2672 kfree(data);
2673 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002674}
2675
Marcel Holtmann07988722014-01-10 02:07:29 -08002676int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002677 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002678 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002679{
2680 struct oob_data *data;
2681
Johan Hedberg6928a922014-10-26 20:46:09 +01002682 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002683 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002684 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002685 if (!data)
2686 return -ENOMEM;
2687
2688 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002689 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002690 list_add(&data->list, &hdev->remote_oob_data);
2691 }
2692
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002693 if (hash192 && rand192) {
2694 memcpy(data->hash192, hash192, sizeof(data->hash192));
2695 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002696 if (hash256 && rand256)
2697 data->present = 0x03;
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002698 } else {
2699 memset(data->hash192, 0, sizeof(data->hash192));
2700 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002701 if (hash256 && rand256)
2702 data->present = 0x02;
2703 else
2704 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002705 }
2706
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002707 if (hash256 && rand256) {
2708 memcpy(data->hash256, hash256, sizeof(data->hash256));
2709 memcpy(data->rand256, rand256, sizeof(data->rand256));
2710 } else {
2711 memset(data->hash256, 0, sizeof(data->hash256));
2712 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002713 if (hash192 && rand192)
2714 data->present = 0x01;
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002715 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002716
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002717 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002718
2719 return 0;
2720}
2721
Florian Grandeld2609b32015-06-18 03:16:34 +02002722/* This function requires the caller holds hdev->lock */
2723struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2724{
2725 struct adv_info *adv_instance;
2726
2727 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2728 if (adv_instance->instance == instance)
2729 return adv_instance;
2730 }
2731
2732 return NULL;
2733}
2734
2735/* This function requires the caller holds hdev->lock */
2736struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2737 struct adv_info *cur_instance;
2738
2739 cur_instance = hci_find_adv_instance(hdev, instance);
2740 if (!cur_instance)
2741 return NULL;
2742
2743 if (cur_instance == list_last_entry(&hdev->adv_instances,
2744 struct adv_info, list))
2745 return list_first_entry(&hdev->adv_instances,
2746 struct adv_info, list);
2747 else
2748 return list_next_entry(cur_instance, list);
2749}
2750
2751/* This function requires the caller holds hdev->lock */
2752int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2753{
2754 struct adv_info *adv_instance;
2755
2756 adv_instance = hci_find_adv_instance(hdev, instance);
2757 if (!adv_instance)
2758 return -ENOENT;
2759
2760 BT_DBG("%s removing %dMR", hdev->name, instance);
2761
Florian Grandel5d900e42015-06-18 03:16:35 +02002762 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2763 cancel_delayed_work(&hdev->adv_instance_expire);
2764 hdev->adv_instance_timeout = 0;
2765 }
2766
Florian Grandeld2609b32015-06-18 03:16:34 +02002767 list_del(&adv_instance->list);
2768 kfree(adv_instance);
2769
2770 hdev->adv_instance_cnt--;
2771
2772 return 0;
2773}
2774
2775/* This function requires the caller holds hdev->lock */
2776void hci_adv_instances_clear(struct hci_dev *hdev)
2777{
2778 struct adv_info *adv_instance, *n;
2779
Florian Grandel5d900e42015-06-18 03:16:35 +02002780 if (hdev->adv_instance_timeout) {
2781 cancel_delayed_work(&hdev->adv_instance_expire);
2782 hdev->adv_instance_timeout = 0;
2783 }
2784
Florian Grandeld2609b32015-06-18 03:16:34 +02002785 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2786 list_del(&adv_instance->list);
2787 kfree(adv_instance);
2788 }
2789
2790 hdev->adv_instance_cnt = 0;
2791}
2792
2793/* This function requires the caller holds hdev->lock */
2794int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2795 u16 adv_data_len, u8 *adv_data,
2796 u16 scan_rsp_len, u8 *scan_rsp_data,
2797 u16 timeout, u16 duration)
2798{
2799 struct adv_info *adv_instance;
2800
2801 adv_instance = hci_find_adv_instance(hdev, instance);
2802 if (adv_instance) {
2803 memset(adv_instance->adv_data, 0,
2804 sizeof(adv_instance->adv_data));
2805 memset(adv_instance->scan_rsp_data, 0,
2806 sizeof(adv_instance->scan_rsp_data));
2807 } else {
2808 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2809 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2810 return -EOVERFLOW;
2811
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002812 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002813 if (!adv_instance)
2814 return -ENOMEM;
2815
Florian Grandelfffd38b2015-06-18 03:16:47 +02002816 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002817 adv_instance->instance = instance;
2818 list_add(&adv_instance->list, &hdev->adv_instances);
2819 hdev->adv_instance_cnt++;
2820 }
2821
2822 adv_instance->flags = flags;
2823 adv_instance->adv_data_len = adv_data_len;
2824 adv_instance->scan_rsp_len = scan_rsp_len;
2825
2826 if (adv_data_len)
2827 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2828
2829 if (scan_rsp_len)
2830 memcpy(adv_instance->scan_rsp_data,
2831 scan_rsp_data, scan_rsp_len);
2832
2833 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002834 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002835
2836 if (duration == 0)
2837 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2838 else
2839 adv_instance->duration = duration;
2840
2841 BT_DBG("%s for %dMR", hdev->name, instance);
2842
2843 return 0;
2844}
2845
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002846struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002847 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002848{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002849 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002850
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002851 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002852 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002853 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002854 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002855
2856 return NULL;
2857}
2858
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002859void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002860{
2861 struct list_head *p, *n;
2862
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002863 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002864 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002865
2866 list_del(p);
2867 kfree(b);
2868 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002869}
2870
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002871int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002872{
2873 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002874
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002875 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002876 return -EBADF;
2877
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002878 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002879 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002880
Johan Hedberg27f70f32014-07-21 10:50:06 +03002881 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002882 if (!entry)
2883 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002884
2885 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002886 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002887
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002888 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002889
2890 return 0;
2891}
2892
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002893int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002894{
2895 struct bdaddr_list *entry;
2896
Johan Hedberg35f74982014-02-18 17:14:32 +02002897 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002898 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002899 return 0;
2900 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002901
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002902 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002903 if (!entry)
2904 return -ENOENT;
2905
2906 list_del(&entry->list);
2907 kfree(entry);
2908
2909 return 0;
2910}
2911
Andre Guedes15819a72014-02-03 13:56:18 -03002912/* This function requires the caller holds hdev->lock */
2913struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2914 bdaddr_t *addr, u8 addr_type)
2915{
2916 struct hci_conn_params *params;
2917
2918 list_for_each_entry(params, &hdev->le_conn_params, list) {
2919 if (bacmp(&params->addr, addr) == 0 &&
2920 params->addr_type == addr_type) {
2921 return params;
2922 }
2923 }
2924
2925 return NULL;
2926}
2927
2928/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002929struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2930 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002931{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002932 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002933
Johan Hedberg501f8822014-07-04 12:37:26 +03002934 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002935 if (bacmp(&param->addr, addr) == 0 &&
2936 param->addr_type == addr_type)
2937 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002938 }
2939
2940 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002941}
2942
2943/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002944struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2945 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002946{
2947 struct hci_conn_params *params;
2948
2949 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002950 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002951 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002952
2953 params = kzalloc(sizeof(*params), GFP_KERNEL);
2954 if (!params) {
2955 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002956 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002957 }
2958
2959 bacpy(&params->addr, addr);
2960 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002961
2962 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002963 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002964
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002965 params->conn_min_interval = hdev->le_conn_min_interval;
2966 params->conn_max_interval = hdev->le_conn_max_interval;
2967 params->conn_latency = hdev->le_conn_latency;
2968 params->supervision_timeout = hdev->le_supv_timeout;
2969 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2970
2971 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2972
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002973 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002974}
2975
Johan Hedbergf6c63242014-08-15 21:06:59 +03002976static void hci_conn_params_free(struct hci_conn_params *params)
2977{
2978 if (params->conn) {
2979 hci_conn_drop(params->conn);
2980 hci_conn_put(params->conn);
2981 }
2982
2983 list_del(&params->action);
2984 list_del(&params->list);
2985 kfree(params);
2986}
2987
Andre Guedes15819a72014-02-03 13:56:18 -03002988/* This function requires the caller holds hdev->lock */
2989void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2990{
2991 struct hci_conn_params *params;
2992
2993 params = hci_conn_params_lookup(hdev, addr, addr_type);
2994 if (!params)
2995 return;
2996
Johan Hedbergf6c63242014-08-15 21:06:59 +03002997 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002998
Johan Hedberg95305ba2014-07-04 12:37:21 +03002999 hci_update_background_scan(hdev);
3000
Andre Guedes15819a72014-02-03 13:56:18 -03003001 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3002}
3003
3004/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03003005void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003006{
3007 struct hci_conn_params *params, *tmp;
3008
3009 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03003010 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3011 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02003012
3013 /* If trying to estabilish one time connection to disabled
3014 * device, leave the params, but mark them as just once.
3015 */
3016 if (params->explicit_connect) {
3017 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3018 continue;
3019 }
3020
Andre Guedes15819a72014-02-03 13:56:18 -03003021 list_del(&params->list);
3022 kfree(params);
3023 }
3024
Johan Hedberg55af49a82014-07-02 17:37:26 +03003025 BT_DBG("All LE disabled connection parameters were removed");
3026}
3027
3028/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003029void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003030{
3031 struct hci_conn_params *params, *tmp;
3032
Johan Hedbergf6c63242014-08-15 21:06:59 +03003033 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3034 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003035
Johan Hedberga2f41a82014-07-04 12:37:19 +03003036 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003037
Andre Guedes15819a72014-02-03 13:56:18 -03003038 BT_DBG("All LE connection parameters were removed");
3039}
3040
Marcel Holtmann1904a852015-01-11 13:50:44 -08003041static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003042{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003043 if (status) {
3044 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003045
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003046 hci_dev_lock(hdev);
3047 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3048 hci_dev_unlock(hdev);
3049 return;
3050 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003051}
3052
Marcel Holtmann1904a852015-01-11 13:50:44 -08003053static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3054 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003055{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003056 /* General inquiry access code (GIAC) */
3057 u8 lap[3] = { 0x33, 0x8b, 0x9e };
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003058 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003059 int err;
3060
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003061 if (status) {
3062 BT_ERR("Failed to disable LE scanning: status %d", status);
3063 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003064 }
3065
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003066 hdev->discovery.scan_start = 0;
3067
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003068 switch (hdev->discovery.type) {
3069 case DISCOV_TYPE_LE:
3070 hci_dev_lock(hdev);
3071 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3072 hci_dev_unlock(hdev);
3073 break;
3074
3075 case DISCOV_TYPE_INTERLEAVED:
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003076 hci_dev_lock(hdev);
3077
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003078 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3079 &hdev->quirks)) {
3080 /* If we were running LE only scan, change discovery
3081 * state. If we were running both LE and BR/EDR inquiry
3082 * simultaneously, and BR/EDR inquiry is already
3083 * finished, stop discovery, otherwise BR/EDR inquiry
Wesley Kuo177d0502015-05-13 10:33:15 +08003084 * will stop discovery when finished. If we will resolve
3085 * remote device name, do not change discovery state.
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003086 */
Wesley Kuo177d0502015-05-13 10:33:15 +08003087 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3088 hdev->discovery.state != DISCOVERY_RESOLVING)
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003089 hci_discovery_set_state(hdev,
3090 DISCOVERY_STOPPED);
3091 } else {
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003092 struct hci_request req;
3093
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003094 hci_inquiry_cache_flush(hdev);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003095
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003096 hci_req_init(&req, hdev);
3097
3098 memset(&cp, 0, sizeof(cp));
3099 memcpy(&cp.lap, lap, sizeof(cp.lap));
3100 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3101 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3102
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003103 err = hci_req_run(&req, inquiry_complete);
3104 if (err) {
3105 BT_ERR("Inquiry request failed: err %d", err);
3106 hci_discovery_set_state(hdev,
3107 DISCOVERY_STOPPED);
3108 }
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003109 }
3110
3111 hci_dev_unlock(hdev);
3112 break;
3113 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003114}
3115
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003116static void le_scan_disable_work(struct work_struct *work)
3117{
3118 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003119 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003120 struct hci_request req;
3121 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003122
3123 BT_DBG("%s", hdev->name);
3124
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003125 cancel_delayed_work_sync(&hdev->le_scan_restart);
3126
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003127 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003128
Andre Guedesb1efcc22014-02-26 20:21:40 -03003129 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003130
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003131 err = hci_req_run(&req, le_scan_disable_work_complete);
3132 if (err)
3133 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003134}
3135
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003136static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3137 u16 opcode)
3138{
3139 unsigned long timeout, duration, scan_start, now;
3140
3141 BT_DBG("%s", hdev->name);
3142
3143 if (status) {
3144 BT_ERR("Failed to restart LE scan: status %d", status);
3145 return;
3146 }
3147
3148 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3149 !hdev->discovery.scan_start)
3150 return;
3151
3152 /* When the scan was started, hdev->le_scan_disable has been queued
3153 * after duration from scan_start. During scan restart this job
3154 * has been canceled, and we need to queue it again after proper
3155 * timeout, to make sure that scan does not run indefinitely.
3156 */
3157 duration = hdev->discovery.scan_duration;
3158 scan_start = hdev->discovery.scan_start;
3159 now = jiffies;
3160 if (now - scan_start <= duration) {
3161 int elapsed;
3162
3163 if (now >= scan_start)
3164 elapsed = now - scan_start;
3165 else
3166 elapsed = ULONG_MAX - scan_start + now;
3167
3168 timeout = duration - elapsed;
3169 } else {
3170 timeout = 0;
3171 }
3172 queue_delayed_work(hdev->workqueue,
3173 &hdev->le_scan_disable, timeout);
3174}
3175
3176static void le_scan_restart_work(struct work_struct *work)
3177{
3178 struct hci_dev *hdev = container_of(work, struct hci_dev,
3179 le_scan_restart.work);
3180 struct hci_request req;
3181 struct hci_cp_le_set_scan_enable cp;
3182 int err;
3183
3184 BT_DBG("%s", hdev->name);
3185
3186 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003187 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003188 return;
3189
3190 hci_req_init(&req, hdev);
3191
3192 hci_req_add_le_scan_disable(&req);
3193
3194 memset(&cp, 0, sizeof(cp));
3195 cp.enable = LE_SCAN_ENABLE;
3196 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3197 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3198
3199 err = hci_req_run(&req, le_scan_restart_work_complete);
3200 if (err)
3201 BT_ERR("Restart LE scan request failed: err %d", err);
3202}
3203
Johan Hedberga1f4c312014-02-27 14:05:41 +02003204/* Copy the Identity Address of the controller.
3205 *
3206 * If the controller has a public BD_ADDR, then by default use that one.
3207 * If this is a LE only controller without a public address, default to
3208 * the static random address.
3209 *
3210 * For debugging purposes it is possible to force controllers with a
3211 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003212 *
3213 * In case BR/EDR has been disabled on a dual-mode controller and
3214 * userspace has configured a static address, then that address
3215 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02003216 */
3217void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3218 u8 *bdaddr_type)
3219{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07003220 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003221 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003222 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003223 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02003224 bacpy(bdaddr, &hdev->static_addr);
3225 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3226 } else {
3227 bacpy(bdaddr, &hdev->bdaddr);
3228 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3229 }
3230}
3231
David Herrmann9be0dab2012-04-22 14:39:57 +02003232/* Alloc HCI device */
3233struct hci_dev *hci_alloc_dev(void)
3234{
3235 struct hci_dev *hdev;
3236
Johan Hedberg27f70f32014-07-21 10:50:06 +03003237 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003238 if (!hdev)
3239 return NULL;
3240
David Herrmannb1b813d2012-04-22 14:39:58 +02003241 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3242 hdev->esco_type = (ESCO_HV1);
3243 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003244 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3245 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003246 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003247 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3248 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02003249 hdev->adv_instance_cnt = 0;
3250 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02003251 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02003252
David Herrmannb1b813d2012-04-22 14:39:58 +02003253 hdev->sniff_max_interval = 800;
3254 hdev->sniff_min_interval = 80;
3255
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003256 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003257 hdev->le_adv_min_interval = 0x0800;
3258 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003259 hdev->le_scan_interval = 0x0060;
3260 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003261 hdev->le_conn_min_interval = 0x0028;
3262 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003263 hdev->le_conn_latency = 0x0000;
3264 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01003265 hdev->le_def_tx_len = 0x001b;
3266 hdev->le_def_tx_time = 0x0148;
3267 hdev->le_max_tx_len = 0x001b;
3268 hdev->le_max_tx_time = 0x0148;
3269 hdev->le_max_rx_len = 0x001b;
3270 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003271
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003272 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003273 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003274 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3275 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003276
David Herrmannb1b813d2012-04-22 14:39:58 +02003277 mutex_init(&hdev->lock);
3278 mutex_init(&hdev->req_lock);
3279
3280 INIT_LIST_HEAD(&hdev->mgmt_pending);
3281 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003282 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003283 INIT_LIST_HEAD(&hdev->uuids);
3284 INIT_LIST_HEAD(&hdev->link_keys);
3285 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003286 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003287 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003288 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003289 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003290 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003291 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003292 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02003293 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02003294
3295 INIT_WORK(&hdev->rx_work, hci_rx_work);
3296 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3297 INIT_WORK(&hdev->tx_work, hci_tx_work);
3298 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003299 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003300
David Herrmannb1b813d2012-04-22 14:39:58 +02003301 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3302 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3303 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003304 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Florian Grandel5d900e42015-06-18 03:16:35 +02003305 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
David Herrmannb1b813d2012-04-22 14:39:58 +02003306
David Herrmannb1b813d2012-04-22 14:39:58 +02003307 skb_queue_head_init(&hdev->rx_q);
3308 skb_queue_head_init(&hdev->cmd_q);
3309 skb_queue_head_init(&hdev->raw_q);
3310
3311 init_waitqueue_head(&hdev->req_wait_q);
3312
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003313 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003314
David Herrmannb1b813d2012-04-22 14:39:58 +02003315 hci_init_sysfs(hdev);
3316 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003317
3318 return hdev;
3319}
3320EXPORT_SYMBOL(hci_alloc_dev);
3321
3322/* Free HCI device */
3323void hci_free_dev(struct hci_dev *hdev)
3324{
David Herrmann9be0dab2012-04-22 14:39:57 +02003325 /* will free via device release */
3326 put_device(&hdev->dev);
3327}
3328EXPORT_SYMBOL(hci_free_dev);
3329
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330/* Register HCI device */
3331int hci_register_dev(struct hci_dev *hdev)
3332{
David Herrmannb1b813d2012-04-22 14:39:58 +02003333 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334
Marcel Holtmann74292d52014-07-06 15:50:27 +02003335 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 return -EINVAL;
3337
Mat Martineau08add512011-11-02 16:18:36 -07003338 /* Do not allow HCI_AMP devices to register at index 0,
3339 * so the index can be used as the AMP controller ID.
3340 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003341 switch (hdev->dev_type) {
3342 case HCI_BREDR:
3343 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3344 break;
3345 case HCI_AMP:
3346 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3347 break;
3348 default:
3349 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003351
Sasha Levin3df92b32012-05-27 22:36:56 +02003352 if (id < 0)
3353 return id;
3354
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 sprintf(hdev->name, "hci%d", id);
3356 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003357
3358 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3359
Kees Cookd8537542013-07-03 15:04:57 -07003360 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3361 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003362 if (!hdev->workqueue) {
3363 error = -ENOMEM;
3364 goto err;
3365 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003366
Kees Cookd8537542013-07-03 15:04:57 -07003367 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3368 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003369 if (!hdev->req_workqueue) {
3370 destroy_workqueue(hdev->workqueue);
3371 error = -ENOMEM;
3372 goto err;
3373 }
3374
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003375 if (!IS_ERR_OR_NULL(bt_debugfs))
3376 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3377
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003378 dev_set_name(&hdev->dev, "%s", hdev->name);
3379
3380 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003381 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003382 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003384 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003385 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3386 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003387 if (hdev->rfkill) {
3388 if (rfkill_register(hdev->rfkill) < 0) {
3389 rfkill_destroy(hdev->rfkill);
3390 hdev->rfkill = NULL;
3391 }
3392 }
3393
Johan Hedberg5e130362013-09-13 08:58:17 +03003394 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003395 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003396
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003397 hci_dev_set_flag(hdev, HCI_SETUP);
3398 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003399
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003400 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003401 /* Assume BR/EDR support until proven otherwise (such as
3402 * through reading supported features during init.
3403 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003404 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003405 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003406
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003407 write_lock(&hci_dev_list_lock);
3408 list_add(&hdev->list, &hci_dev_list);
3409 write_unlock(&hci_dev_list_lock);
3410
Marcel Holtmann4a964402014-07-02 19:10:33 +02003411 /* Devices that are marked for raw-only usage are unconfigured
3412 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003413 */
3414 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003415 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003416
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003418 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419
Johan Hedberg19202572013-01-14 22:33:51 +02003420 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003421
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003423
David Herrmann33ca9542011-10-08 14:58:49 +02003424err_wqueue:
3425 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003426 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003427err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003428 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003429
David Herrmann33ca9542011-10-08 14:58:49 +02003430 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431}
3432EXPORT_SYMBOL(hci_register_dev);
3433
3434/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003435void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003437 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003438
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003439 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003441 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003442
Sasha Levin3df92b32012-05-27 22:36:56 +02003443 id = hdev->id;
3444
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003445 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003447 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448
3449 hci_dev_do_close(hdev);
3450
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003451 cancel_work_sync(&hdev->power_on);
3452
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003453 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003454 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3455 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003456 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003457 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003458 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003459 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003460
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003461 /* mgmt_index_removed should take care of emptying the
3462 * pending list */
3463 BUG_ON(!list_empty(&hdev->mgmt_pending));
3464
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 hci_notify(hdev, HCI_DEV_UNREG);
3466
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003467 if (hdev->rfkill) {
3468 rfkill_unregister(hdev->rfkill);
3469 rfkill_destroy(hdev->rfkill);
3470 }
3471
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003472 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003473
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003474 debugfs_remove_recursive(hdev->debugfs);
3475
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003476 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003477 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003478
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003479 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003480 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003481 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003482 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003483 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003484 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003485 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003486 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003487 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003488 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003489 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003490 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003491 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003492
David Herrmanndc946bd2012-01-07 15:47:24 +01003493 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003494
3495 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496}
3497EXPORT_SYMBOL(hci_unregister_dev);
3498
3499/* Suspend HCI device */
3500int hci_suspend_dev(struct hci_dev *hdev)
3501{
3502 hci_notify(hdev, HCI_DEV_SUSPEND);
3503 return 0;
3504}
3505EXPORT_SYMBOL(hci_suspend_dev);
3506
3507/* Resume HCI device */
3508int hci_resume_dev(struct hci_dev *hdev)
3509{
3510 hci_notify(hdev, HCI_DEV_RESUME);
3511 return 0;
3512}
3513EXPORT_SYMBOL(hci_resume_dev);
3514
Marcel Holtmann75e05692014-11-02 08:15:38 +01003515/* Reset HCI device */
3516int hci_reset_dev(struct hci_dev *hdev)
3517{
3518 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3519 struct sk_buff *skb;
3520
3521 skb = bt_skb_alloc(3, GFP_ATOMIC);
3522 if (!skb)
3523 return -ENOMEM;
3524
3525 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3526 memcpy(skb_put(skb, 3), hw_err, 3);
3527
3528 /* Send Hardware Error to upper stack */
3529 return hci_recv_frame(hdev, skb);
3530}
3531EXPORT_SYMBOL(hci_reset_dev);
3532
Marcel Holtmann76bca882009-11-18 00:40:39 +01003533/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003534int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003535{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003536 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003537 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003538 kfree_skb(skb);
3539 return -ENXIO;
3540 }
3541
Marcel Holtmannfe806dc2015-10-08 03:14:28 +02003542 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3543 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3544 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3545 kfree_skb(skb);
3546 return -EINVAL;
3547 }
3548
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003549 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003550 bt_cb(skb)->incoming = 1;
3551
3552 /* Time stamp */
3553 __net_timestamp(skb);
3554
Marcel Holtmann76bca882009-11-18 00:40:39 +01003555 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003556 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003557
Marcel Holtmann76bca882009-11-18 00:40:39 +01003558 return 0;
3559}
3560EXPORT_SYMBOL(hci_recv_frame);
3561
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003562/* Receive diagnostic message from HCI drivers */
3563int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3564{
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003565 /* Mark as diagnostic packet */
3566 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3567
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003568 /* Time stamp */
3569 __net_timestamp(skb);
3570
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003571 skb_queue_tail(&hdev->rx_q, skb);
3572 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003573
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003574 return 0;
3575}
3576EXPORT_SYMBOL(hci_recv_diag);
3577
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578/* ---- Interface to upper protocols ---- */
3579
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580int hci_register_cb(struct hci_cb *cb)
3581{
3582 BT_DBG("%p name %s", cb, cb->name);
3583
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003584 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003585 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003586 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587
3588 return 0;
3589}
3590EXPORT_SYMBOL(hci_register_cb);
3591
3592int hci_unregister_cb(struct hci_cb *cb)
3593{
3594 BT_DBG("%p name %s", cb, cb->name);
3595
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003596 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003598 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599
3600 return 0;
3601}
3602EXPORT_SYMBOL(hci_unregister_cb);
3603
Marcel Holtmann51086992013-10-10 14:54:19 -07003604static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003606 int err;
3607
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003608 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003610 /* Time stamp */
3611 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003613 /* Send copy to monitor */
3614 hci_send_to_monitor(hdev, skb);
3615
3616 if (atomic_read(&hdev->promisc)) {
3617 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003618 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619 }
3620
3621 /* Get rid of skb owner, prior to sending to the driver. */
3622 skb_orphan(skb);
3623
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003624 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3625 kfree_skb(skb);
3626 return;
3627 }
3628
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003629 err = hdev->send(hdev, skb);
3630 if (err < 0) {
3631 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3632 kfree_skb(skb);
3633 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634}
3635
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003636/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003637int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3638 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003639{
3640 struct sk_buff *skb;
3641
3642 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3643
3644 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3645 if (!skb) {
3646 BT_ERR("%s no memory for command", hdev->name);
3647 return -ENOMEM;
3648 }
3649
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003650 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003651 * single-command requests.
3652 */
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01003653 bt_cb(skb)->hci.req_start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02003654
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003656 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657
3658 return 0;
3659}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660
3661/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003662void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663{
3664 struct hci_command_hdr *hdr;
3665
3666 if (!hdev->sent_cmd)
3667 return NULL;
3668
3669 hdr = (void *) hdev->sent_cmd->data;
3670
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003671 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672 return NULL;
3673
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003674 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675
3676 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3677}
3678
Loic Poulainfbef1682015-09-29 15:05:44 +02003679/* Send HCI command and wait for command commplete event */
3680struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3681 const void *param, u32 timeout)
3682{
3683 struct sk_buff *skb;
3684
3685 if (!test_bit(HCI_UP, &hdev->flags))
3686 return ERR_PTR(-ENETDOWN);
3687
3688 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3689
3690 hci_req_lock(hdev);
3691 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3692 hci_req_unlock(hdev);
3693
3694 return skb;
3695}
3696EXPORT_SYMBOL(hci_cmd_sync);
3697
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698/* Send ACL data */
3699static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3700{
3701 struct hci_acl_hdr *hdr;
3702 int len = skb->len;
3703
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003704 skb_push(skb, HCI_ACL_HDR_SIZE);
3705 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003706 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003707 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3708 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709}
3710
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003711static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003712 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003714 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715 struct hci_dev *hdev = conn->hdev;
3716 struct sk_buff *list;
3717
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003718 skb->len = skb_headlen(skb);
3719 skb->data_len = 0;
3720
3721 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003722
3723 switch (hdev->dev_type) {
3724 case HCI_BREDR:
3725 hci_add_acl_hdr(skb, conn->handle, flags);
3726 break;
3727 case HCI_AMP:
3728 hci_add_acl_hdr(skb, chan->handle, flags);
3729 break;
3730 default:
3731 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3732 return;
3733 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003734
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003735 list = skb_shinfo(skb)->frag_list;
3736 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737 /* Non fragmented */
3738 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3739
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003740 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 } else {
3742 /* Fragmented */
3743 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3744
3745 skb_shinfo(skb)->frag_list = NULL;
3746
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003747 /* Queue all fragments atomically. We need to use spin_lock_bh
3748 * here because of 6LoWPAN links, as there this function is
3749 * called from softirq and using normal spin lock could cause
3750 * deadlocks.
3751 */
3752 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003754 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003755
3756 flags &= ~ACL_START;
3757 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 do {
3759 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003760
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003761 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003762 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763
3764 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3765
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003766 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767 } while (list);
3768
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003769 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003771}
3772
3773void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3774{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003775 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003776
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003777 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003778
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003779 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003781 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783
3784/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003785void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786{
3787 struct hci_dev *hdev = conn->hdev;
3788 struct hci_sco_hdr hdr;
3789
3790 BT_DBG("%s len %d", hdev->name, skb->len);
3791
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003792 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793 hdr.dlen = skb->len;
3794
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003795 skb_push(skb, HCI_SCO_HDR_SIZE);
3796 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003797 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003799 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003800
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003802 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804
3805/* ---- HCI TX task (outgoing data) ---- */
3806
3807/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003808static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3809 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810{
3811 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003812 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003813 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003815 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003817
3818 rcu_read_lock();
3819
3820 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003821 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003823
3824 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3825 continue;
3826
Linus Torvalds1da177e2005-04-16 15:20:36 -07003827 num++;
3828
3829 if (c->sent < min) {
3830 min = c->sent;
3831 conn = c;
3832 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003833
3834 if (hci_conn_num(hdev, type) == num)
3835 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836 }
3837
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003838 rcu_read_unlock();
3839
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003841 int cnt, q;
3842
3843 switch (conn->type) {
3844 case ACL_LINK:
3845 cnt = hdev->acl_cnt;
3846 break;
3847 case SCO_LINK:
3848 case ESCO_LINK:
3849 cnt = hdev->sco_cnt;
3850 break;
3851 case LE_LINK:
3852 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3853 break;
3854 default:
3855 cnt = 0;
3856 BT_ERR("Unknown link type");
3857 }
3858
3859 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860 *quote = q ? q : 1;
3861 } else
3862 *quote = 0;
3863
3864 BT_DBG("conn %p quote %d", conn, *quote);
3865 return conn;
3866}
3867
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003868static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869{
3870 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003871 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872
Ville Tervobae1f5d92011-02-10 22:38:53 -03003873 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003875 rcu_read_lock();
3876
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003878 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003879 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003880 BT_ERR("%s killing stalled connection %pMR",
3881 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003882 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883 }
3884 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003885
3886 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887}
3888
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003889static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3890 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003891{
3892 struct hci_conn_hash *h = &hdev->conn_hash;
3893 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003894 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003895 struct hci_conn *conn;
3896 int cnt, q, conn_num = 0;
3897
3898 BT_DBG("%s", hdev->name);
3899
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003900 rcu_read_lock();
3901
3902 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003903 struct hci_chan *tmp;
3904
3905 if (conn->type != type)
3906 continue;
3907
3908 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3909 continue;
3910
3911 conn_num++;
3912
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003913 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003914 struct sk_buff *skb;
3915
3916 if (skb_queue_empty(&tmp->data_q))
3917 continue;
3918
3919 skb = skb_peek(&tmp->data_q);
3920 if (skb->priority < cur_prio)
3921 continue;
3922
3923 if (skb->priority > cur_prio) {
3924 num = 0;
3925 min = ~0;
3926 cur_prio = skb->priority;
3927 }
3928
3929 num++;
3930
3931 if (conn->sent < min) {
3932 min = conn->sent;
3933 chan = tmp;
3934 }
3935 }
3936
3937 if (hci_conn_num(hdev, type) == conn_num)
3938 break;
3939 }
3940
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003941 rcu_read_unlock();
3942
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003943 if (!chan)
3944 return NULL;
3945
3946 switch (chan->conn->type) {
3947 case ACL_LINK:
3948 cnt = hdev->acl_cnt;
3949 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003950 case AMP_LINK:
3951 cnt = hdev->block_cnt;
3952 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003953 case SCO_LINK:
3954 case ESCO_LINK:
3955 cnt = hdev->sco_cnt;
3956 break;
3957 case LE_LINK:
3958 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3959 break;
3960 default:
3961 cnt = 0;
3962 BT_ERR("Unknown link type");
3963 }
3964
3965 q = cnt / num;
3966 *quote = q ? q : 1;
3967 BT_DBG("chan %p quote %d", chan, *quote);
3968 return chan;
3969}
3970
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003971static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3972{
3973 struct hci_conn_hash *h = &hdev->conn_hash;
3974 struct hci_conn *conn;
3975 int num = 0;
3976
3977 BT_DBG("%s", hdev->name);
3978
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003979 rcu_read_lock();
3980
3981 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003982 struct hci_chan *chan;
3983
3984 if (conn->type != type)
3985 continue;
3986
3987 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3988 continue;
3989
3990 num++;
3991
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003992 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003993 struct sk_buff *skb;
3994
3995 if (chan->sent) {
3996 chan->sent = 0;
3997 continue;
3998 }
3999
4000 if (skb_queue_empty(&chan->data_q))
4001 continue;
4002
4003 skb = skb_peek(&chan->data_q);
4004 if (skb->priority >= HCI_PRIO_MAX - 1)
4005 continue;
4006
4007 skb->priority = HCI_PRIO_MAX - 1;
4008
4009 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004010 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004011 }
4012
4013 if (hci_conn_num(hdev, type) == num)
4014 break;
4015 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004016
4017 rcu_read_unlock();
4018
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004019}
4020
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004021static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4022{
4023 /* Calculate count of blocks used by this packet */
4024 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4025}
4026
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004027static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004029 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004030 /* ACL tx timeout must be longer than maximum
4031 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004032 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004033 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004034 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004036}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004038static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004039{
4040 unsigned int cnt = hdev->acl_cnt;
4041 struct hci_chan *chan;
4042 struct sk_buff *skb;
4043 int quote;
4044
4045 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004046
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004047 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004048 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004049 u32 priority = (skb_peek(&chan->data_q))->priority;
4050 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004051 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004052 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004053
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004054 /* Stop if priority has changed */
4055 if (skb->priority < priority)
4056 break;
4057
4058 skb = skb_dequeue(&chan->data_q);
4059
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004060 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004061 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004062
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004063 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064 hdev->acl_last_tx = jiffies;
4065
4066 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004067 chan->sent++;
4068 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069 }
4070 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004071
4072 if (cnt != hdev->acl_cnt)
4073 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074}
4075
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004076static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004077{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004078 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004079 struct hci_chan *chan;
4080 struct sk_buff *skb;
4081 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004082 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004083
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004084 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004085
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004086 BT_DBG("%s", hdev->name);
4087
4088 if (hdev->dev_type == HCI_AMP)
4089 type = AMP_LINK;
4090 else
4091 type = ACL_LINK;
4092
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004093 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004094 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004095 u32 priority = (skb_peek(&chan->data_q))->priority;
4096 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4097 int blocks;
4098
4099 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004100 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004101
4102 /* Stop if priority has changed */
4103 if (skb->priority < priority)
4104 break;
4105
4106 skb = skb_dequeue(&chan->data_q);
4107
4108 blocks = __get_blocks(hdev, skb);
4109 if (blocks > hdev->block_cnt)
4110 return;
4111
4112 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004113 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004114
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004115 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004116 hdev->acl_last_tx = jiffies;
4117
4118 hdev->block_cnt -= blocks;
4119 quote -= blocks;
4120
4121 chan->sent += blocks;
4122 chan->conn->sent += blocks;
4123 }
4124 }
4125
4126 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004127 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004128}
4129
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004130static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004131{
4132 BT_DBG("%s", hdev->name);
4133
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004134 /* No ACL link over BR/EDR controller */
4135 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4136 return;
4137
4138 /* No AMP link over AMP controller */
4139 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004140 return;
4141
4142 switch (hdev->flow_ctl_mode) {
4143 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4144 hci_sched_acl_pkt(hdev);
4145 break;
4146
4147 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4148 hci_sched_acl_blk(hdev);
4149 break;
4150 }
4151}
4152
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004154static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155{
4156 struct hci_conn *conn;
4157 struct sk_buff *skb;
4158 int quote;
4159
4160 BT_DBG("%s", hdev->name);
4161
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004162 if (!hci_conn_num(hdev, SCO_LINK))
4163 return;
4164
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4166 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4167 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004168 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169
4170 conn->sent++;
4171 if (conn->sent == ~0)
4172 conn->sent = 0;
4173 }
4174 }
4175}
4176
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004177static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004178{
4179 struct hci_conn *conn;
4180 struct sk_buff *skb;
4181 int quote;
4182
4183 BT_DBG("%s", hdev->name);
4184
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004185 if (!hci_conn_num(hdev, ESCO_LINK))
4186 return;
4187
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004188 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4189 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004190 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4191 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004192 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004193
4194 conn->sent++;
4195 if (conn->sent == ~0)
4196 conn->sent = 0;
4197 }
4198 }
4199}
4200
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004201static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004202{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004203 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004204 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004205 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004206
4207 BT_DBG("%s", hdev->name);
4208
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004209 if (!hci_conn_num(hdev, LE_LINK))
4210 return;
4211
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004212 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004213 /* LE tx timeout must be longer than maximum
4214 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004215 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004216 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004217 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004218 }
4219
4220 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004221 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004222 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004223 u32 priority = (skb_peek(&chan->data_q))->priority;
4224 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004225 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004226 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004227
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004228 /* Stop if priority has changed */
4229 if (skb->priority < priority)
4230 break;
4231
4232 skb = skb_dequeue(&chan->data_q);
4233
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004234 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004235 hdev->le_last_tx = jiffies;
4236
4237 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004238 chan->sent++;
4239 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004240 }
4241 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004242
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004243 if (hdev->le_pkts)
4244 hdev->le_cnt = cnt;
4245 else
4246 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004247
4248 if (cnt != tmp)
4249 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004250}
4251
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004252static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004254 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255 struct sk_buff *skb;
4256
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004257 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004258 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004260 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07004261 /* Schedule queues and send stuff to HCI driver */
4262 hci_sched_acl(hdev);
4263 hci_sched_sco(hdev);
4264 hci_sched_esco(hdev);
4265 hci_sched_le(hdev);
4266 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004267
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268 /* Send next queued raw (unknown type) packet */
4269 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004270 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004271}
4272
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004273/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274
4275/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004276static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277{
4278 struct hci_acl_hdr *hdr = (void *) skb->data;
4279 struct hci_conn *conn;
4280 __u16 handle, flags;
4281
4282 skb_pull(skb, HCI_ACL_HDR_SIZE);
4283
4284 handle = __le16_to_cpu(hdr->handle);
4285 flags = hci_flags(handle);
4286 handle = hci_handle(handle);
4287
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004288 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004289 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290
4291 hdev->stat.acl_rx++;
4292
4293 hci_dev_lock(hdev);
4294 conn = hci_conn_hash_lookup_handle(hdev, handle);
4295 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004296
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004298 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004299
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004301 l2cap_recv_acldata(conn, skb, flags);
4302 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004304 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004305 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306 }
4307
4308 kfree_skb(skb);
4309}
4310
4311/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004312static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313{
4314 struct hci_sco_hdr *hdr = (void *) skb->data;
4315 struct hci_conn *conn;
4316 __u16 handle;
4317
4318 skb_pull(skb, HCI_SCO_HDR_SIZE);
4319
4320 handle = __le16_to_cpu(hdr->handle);
4321
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004322 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323
4324 hdev->stat.sco_rx++;
4325
4326 hci_dev_lock(hdev);
4327 conn = hci_conn_hash_lookup_handle(hdev, handle);
4328 hci_dev_unlock(hdev);
4329
4330 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004332 sco_recv_scodata(conn, skb);
4333 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004335 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004336 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337 }
4338
4339 kfree_skb(skb);
4340}
4341
Johan Hedberg9238f362013-03-05 20:37:48 +02004342static bool hci_req_is_complete(struct hci_dev *hdev)
4343{
4344 struct sk_buff *skb;
4345
4346 skb = skb_peek(&hdev->cmd_q);
4347 if (!skb)
4348 return true;
4349
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004350 return bt_cb(skb)->hci.req_start;
Johan Hedberg9238f362013-03-05 20:37:48 +02004351}
4352
Johan Hedberg42c6b122013-03-05 20:37:49 +02004353static void hci_resend_last(struct hci_dev *hdev)
4354{
4355 struct hci_command_hdr *sent;
4356 struct sk_buff *skb;
4357 u16 opcode;
4358
4359 if (!hdev->sent_cmd)
4360 return;
4361
4362 sent = (void *) hdev->sent_cmd->data;
4363 opcode = __le16_to_cpu(sent->opcode);
4364 if (opcode == HCI_OP_RESET)
4365 return;
4366
4367 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4368 if (!skb)
4369 return;
4370
4371 skb_queue_head(&hdev->cmd_q, skb);
4372 queue_work(hdev->workqueue, &hdev->cmd_work);
4373}
4374
Johan Hedberge62144872015-04-02 13:41:08 +03004375void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4376 hci_req_complete_t *req_complete,
4377 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004378{
Johan Hedberg9238f362013-03-05 20:37:48 +02004379 struct sk_buff *skb;
4380 unsigned long flags;
4381
4382 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4383
Johan Hedberg42c6b122013-03-05 20:37:49 +02004384 /* If the completed command doesn't match the last one that was
4385 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004386 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004387 if (!hci_sent_cmd_data(hdev, opcode)) {
4388 /* Some CSR based controllers generate a spontaneous
4389 * reset complete event during init and any pending
4390 * command will never be completed. In such a case we
4391 * need to resend whatever was the last sent
4392 * command.
4393 */
4394 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4395 hci_resend_last(hdev);
4396
Johan Hedberg9238f362013-03-05 20:37:48 +02004397 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004398 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004399
4400 /* If the command succeeded and there's still more commands in
4401 * this request the request is not yet complete.
4402 */
4403 if (!status && !hci_req_is_complete(hdev))
4404 return;
4405
4406 /* If this was the last command in a request the complete
4407 * callback would be found in hdev->sent_cmd instead of the
4408 * command queue (hdev->cmd_q).
4409 */
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004410 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4411 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
Johan Hedberge62144872015-04-02 13:41:08 +03004412 return;
4413 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004414
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004415 if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
4416 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
Johan Hedberge62144872015-04-02 13:41:08 +03004417 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004418 }
4419
4420 /* Remove all pending commands belonging to this request */
4421 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4422 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004423 if (bt_cb(skb)->hci.req_start) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004424 __skb_queue_head(&hdev->cmd_q, skb);
4425 break;
4426 }
4427
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004428 *req_complete = bt_cb(skb)->hci.req_complete;
4429 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
Johan Hedberg9238f362013-03-05 20:37:48 +02004430 kfree_skb(skb);
4431 }
4432 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004433}
4434
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004435static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004437 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 struct sk_buff *skb;
4439
4440 BT_DBG("%s", hdev->name);
4441
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004443 /* Send copy to monitor */
4444 hci_send_to_monitor(hdev, skb);
4445
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446 if (atomic_read(&hdev->promisc)) {
4447 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004448 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449 }
4450
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004451 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452 kfree_skb(skb);
4453 continue;
4454 }
4455
4456 if (test_bit(HCI_INIT, &hdev->flags)) {
4457 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004458 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459 case HCI_ACLDATA_PKT:
4460 case HCI_SCODATA_PKT:
4461 kfree_skb(skb);
4462 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464 }
4465
4466 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004467 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004469 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470 hci_event_packet(hdev, skb);
4471 break;
4472
4473 case HCI_ACLDATA_PKT:
4474 BT_DBG("%s ACL data packet", hdev->name);
4475 hci_acldata_packet(hdev, skb);
4476 break;
4477
4478 case HCI_SCODATA_PKT:
4479 BT_DBG("%s SCO data packet", hdev->name);
4480 hci_scodata_packet(hdev, skb);
4481 break;
4482
4483 default:
4484 kfree_skb(skb);
4485 break;
4486 }
4487 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488}
4489
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004490static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004492 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493 struct sk_buff *skb;
4494
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004495 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4496 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004499 if (atomic_read(&hdev->cmd_cnt)) {
4500 skb = skb_dequeue(&hdev->cmd_q);
4501 if (!skb)
4502 return;
4503
Wei Yongjun7585b972009-02-25 18:29:52 +08004504 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004506 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004507 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004509 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004510 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004511 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004512 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004513 schedule_delayed_work(&hdev->cmd_timer,
4514 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515 } else {
4516 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004517 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518 }
4519 }
4520}