blob: d2b3dd32d6cf1c6469b9fc728c62a625ac1c9b67 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020054DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070083 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070097
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700123 kfree_skb(skb);
124
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700126
127 return count;
128}
129
130static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135};
136
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200137static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
138 size_t count, loff_t *ppos)
139{
140 struct hci_dev *hdev = file->private_data;
141 char buf[3];
142
143 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
144 buf[1] = '\n';
145 buf[2] = '\0';
146 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
147}
148
149static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
150 size_t count, loff_t *ppos)
151{
152 struct hci_dev *hdev = file->private_data;
153 char buf[32];
154 size_t buf_size = min(count, (sizeof(buf)-1));
155 bool enable;
156 int err;
157
158 if (copy_from_user(buf, user_buf, buf_size))
159 return -EFAULT;
160
161 buf[buf_size] = '\0';
162 if (strtobool(buf, &enable))
163 return -EINVAL;
164
165 hci_req_lock(hdev);
166 err = hdev->set_diag(hdev, enable);
167 hci_req_unlock(hdev);
168
169 if (err < 0)
170 return err;
171
172 if (enable)
173 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
174 else
175 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
176
177 return count;
178}
179
180static const struct file_operations vendor_diag_fops = {
181 .open = simple_open,
182 .read = vendor_diag_read,
183 .write = vendor_diag_write,
184 .llseek = default_llseek,
185};
186
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200187static void hci_debugfs_create_basic(struct hci_dev *hdev)
188{
189 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
190 &dut_mode_fops);
191
192 if (hdev->set_diag)
193 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
194 &vendor_diag_fops);
195}
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197/* ---- HCI requests ---- */
198
Johan Hedbergf60cb302015-04-02 13:41:09 +0300199static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
200 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200202 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 if (hdev->req_status == HCI_REQ_PEND) {
205 hdev->req_result = result;
206 hdev->req_status = HCI_REQ_DONE;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300207 if (skb)
208 hdev->req_skb = skb_get(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 wake_up_interruptible(&hdev->req_wait_q);
210 }
211}
212
213static void hci_req_cancel(struct hci_dev *hdev, int err)
214{
215 BT_DBG("%s err 0x%2.2x", hdev->name, err);
216
217 if (hdev->req_status == HCI_REQ_PEND) {
218 hdev->req_result = err;
219 hdev->req_status = HCI_REQ_CANCELED;
220 wake_up_interruptible(&hdev->req_wait_q);
221 }
222}
223
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300224struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300225 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300226{
227 DECLARE_WAITQUEUE(wait, current);
228 struct hci_request req;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300229 struct sk_buff *skb;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300230 int err = 0;
231
232 BT_DBG("%s", hdev->name);
233
234 hci_req_init(&req, hdev);
235
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300236 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300237
238 hdev->req_status = HCI_REQ_PEND;
239
Johan Hedberg75e84b72013-04-02 13:35:04 +0300240 add_wait_queue(&hdev->req_wait_q, &wait);
241 set_current_state(TASK_INTERRUPTIBLE);
242
Johan Hedbergf60cb302015-04-02 13:41:09 +0300243 err = hci_req_run_skb(&req, hci_req_sync_complete);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900244 if (err < 0) {
245 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200246 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900247 return ERR_PTR(err);
248 }
249
Johan Hedberg75e84b72013-04-02 13:35:04 +0300250 schedule_timeout(timeout);
251
252 remove_wait_queue(&hdev->req_wait_q, &wait);
253
254 if (signal_pending(current))
255 return ERR_PTR(-EINTR);
256
257 switch (hdev->req_status) {
258 case HCI_REQ_DONE:
259 err = -bt_to_errno(hdev->req_result);
260 break;
261
262 case HCI_REQ_CANCELED:
263 err = -hdev->req_result;
264 break;
265
266 default:
267 err = -ETIMEDOUT;
268 break;
269 }
270
271 hdev->req_status = hdev->req_result = 0;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300272 skb = hdev->req_skb;
273 hdev->req_skb = NULL;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300274
275 BT_DBG("%s end: err %d", hdev->name, err);
276
Johan Hedbergf60cb302015-04-02 13:41:09 +0300277 if (err < 0) {
278 kfree_skb(skb);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300279 return ERR_PTR(err);
Johan Hedbergf60cb302015-04-02 13:41:09 +0300280 }
Johan Hedberg75e84b72013-04-02 13:35:04 +0300281
Johan Hedberg757aa0b2015-04-02 13:41:12 +0300282 if (!skb)
283 return ERR_PTR(-ENODATA);
284
285 return skb;
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300286}
287EXPORT_SYMBOL(__hci_cmd_sync_ev);
288
289struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300290 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300291{
292 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300293}
294EXPORT_SYMBOL(__hci_cmd_sync);
295
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200297static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200298 void (*func)(struct hci_request *req,
299 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200300 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200302 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 DECLARE_WAITQUEUE(wait, current);
304 int err = 0;
305
306 BT_DBG("%s start", hdev->name);
307
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_init(&req, hdev);
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 hdev->req_status = HCI_REQ_PEND;
311
Johan Hedberg42c6b122013-03-05 20:37:49 +0200312 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200313
Chan-yeol Park039fada2014-10-31 14:23:06 +0900314 add_wait_queue(&hdev->req_wait_q, &wait);
315 set_current_state(TASK_INTERRUPTIBLE);
316
Johan Hedbergf60cb302015-04-02 13:41:09 +0300317 err = hci_req_run_skb(&req, hci_req_sync_complete);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200318 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200319 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300320
Chan-yeol Park039fada2014-10-31 14:23:06 +0900321 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200322 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900323
Andre Guedes920c8302013-03-08 11:20:15 -0300324 /* ENODATA means the HCI request command queue is empty.
325 * This can happen when a request with conditionals doesn't
326 * trigger any commands to be sent. This is normal behavior
327 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200328 */
Andre Guedes920c8302013-03-08 11:20:15 -0300329 if (err == -ENODATA)
330 return 0;
331
332 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200333 }
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 schedule_timeout(timeout);
336
337 remove_wait_queue(&hdev->req_wait_q, &wait);
338
339 if (signal_pending(current))
340 return -EINTR;
341
342 switch (hdev->req_status) {
343 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700344 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 break;
346
347 case HCI_REQ_CANCELED:
348 err = -hdev->req_result;
349 break;
350
351 default:
352 err = -ETIMEDOUT;
353 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700354 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
Johan Hedberga5040ef2011-01-10 13:28:59 +0200356 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
358 BT_DBG("%s end: err %d", hdev->name, err);
359
360 return err;
361}
362
Johan Hedberg01178cd2013-03-05 20:37:41 +0200363static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200364 void (*req)(struct hci_request *req,
365 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200366 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
368 int ret;
369
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200370 if (!test_bit(HCI_UP, &hdev->flags))
371 return -ENETDOWN;
372
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 /* Serialize all requests */
374 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200375 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 hci_req_unlock(hdev);
377
378 return ret;
379}
380
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200383 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
385 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200386 set_bit(HCI_RESET, &req->hdev->flags);
387 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388}
389
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200392 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200395 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200397 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200399
400 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200401 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402}
403
Johan Hedberg0af801b2015-02-17 15:05:21 +0200404static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200405{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200406 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200407
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200408 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200409 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300410
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700411 /* Read Local Supported Commands */
412 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
413
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300414 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200415 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300416
417 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200418 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700419
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700420 /* Read Flow Control Mode */
421 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
422
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700423 /* Read Location Data */
424 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200425}
426
Johan Hedberg0af801b2015-02-17 15:05:21 +0200427static void amp_init2(struct hci_request *req)
428{
429 /* Read Local Supported Features. Not all AMP controllers
430 * support this so it's placed conditionally in the second
431 * stage init.
432 */
433 if (req->hdev->commands[14] & 0x20)
434 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
435}
436
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200438{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200439 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200440
441 BT_DBG("%s %ld", hdev->name, opt);
442
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300443 /* Reset */
444 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200445 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300446
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200447 switch (hdev->dev_type) {
448 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200449 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200450 break;
451
452 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200453 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200454 break;
455
456 default:
457 BT_ERR("Unknown device type %d", hdev->dev_type);
458 break;
459 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200460}
461
Johan Hedberg42c6b122013-03-05 20:37:49 +0200462static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200463{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200464 __le16 param;
465 __u8 flt_type;
466
467 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200468 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469
470 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200471 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200472
473 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200474 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200475
476 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200477 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200478
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700479 /* Read Number of Supported IAC */
480 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
481
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700482 /* Read Current IAC LAP */
483 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
484
Johan Hedberg2177bab2013-03-05 20:37:43 +0200485 /* Clear Event Filters */
486 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200487 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200488
489 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700490 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200491 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200492}
493
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200495{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300496 struct hci_dev *hdev = req->hdev;
497
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200499 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200500
501 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200503
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800504 /* Read LE Supported States */
505 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
506
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200508 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200509
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800510 /* Clear LE White List */
511 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300512
513 /* LE-only controllers have LE implicitly enabled */
514 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700515 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200516}
517
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200519{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200520 struct hci_dev *hdev = req->hdev;
521
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522 /* The second byte is 0xff instead of 0x9f (two reserved bits
523 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
524 * command otherwise.
525 */
526 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
527
528 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
529 * any event mask for pre 1.2 devices.
530 */
531 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
532 return;
533
534 if (lmp_bredr_capable(hdev)) {
535 events[4] |= 0x01; /* Flow Specification Complete */
536 events[4] |= 0x02; /* Inquiry Result with RSSI */
537 events[4] |= 0x04; /* Read Remote Extended Features Complete */
538 events[5] |= 0x08; /* Synchronous Connection Complete */
539 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700540 } else {
541 /* Use a different default for LE-only devices */
542 memset(events, 0, sizeof(events));
543 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700544 events[1] |= 0x08; /* Read Remote Version Information Complete */
545 events[1] |= 0x20; /* Command Complete */
546 events[1] |= 0x40; /* Command Status */
547 events[1] |= 0x80; /* Hardware Error */
548 events[2] |= 0x04; /* Number of Completed Packets */
549 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200550
551 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
552 events[0] |= 0x80; /* Encryption Change */
553 events[5] |= 0x80; /* Encryption Key Refresh Complete */
554 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200555 }
556
557 if (lmp_inq_rssi_capable(hdev))
558 events[4] |= 0x02; /* Inquiry Result with RSSI */
559
560 if (lmp_sniffsubr_capable(hdev))
561 events[5] |= 0x20; /* Sniff Subrating */
562
563 if (lmp_pause_enc_capable(hdev))
564 events[5] |= 0x80; /* Encryption Key Refresh Complete */
565
566 if (lmp_ext_inq_capable(hdev))
567 events[5] |= 0x40; /* Extended Inquiry Result */
568
569 if (lmp_no_flush_capable(hdev))
570 events[7] |= 0x01; /* Enhanced Flush Complete */
571
572 if (lmp_lsto_capable(hdev))
573 events[6] |= 0x80; /* Link Supervision Timeout Changed */
574
575 if (lmp_ssp_capable(hdev)) {
576 events[6] |= 0x01; /* IO Capability Request */
577 events[6] |= 0x02; /* IO Capability Response */
578 events[6] |= 0x04; /* User Confirmation Request */
579 events[6] |= 0x08; /* User Passkey Request */
580 events[6] |= 0x10; /* Remote OOB Data Request */
581 events[6] |= 0x20; /* Simple Pairing Complete */
582 events[7] |= 0x04; /* User Passkey Notification */
583 events[7] |= 0x08; /* Keypress Notification */
584 events[7] |= 0x10; /* Remote Host Supported
585 * Features Notification
586 */
587 }
588
589 if (lmp_le_capable(hdev))
590 events[7] |= 0x20; /* LE Meta-Event */
591
Johan Hedberg42c6b122013-03-05 20:37:49 +0200592 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200593}
594
Johan Hedberg42c6b122013-03-05 20:37:49 +0200595static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200596{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200597 struct hci_dev *hdev = req->hdev;
598
Johan Hedberg0af801b2015-02-17 15:05:21 +0200599 if (hdev->dev_type == HCI_AMP)
600 return amp_init2(req);
601
Johan Hedberg2177bab2013-03-05 20:37:43 +0200602 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200603 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300604 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700605 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200606
607 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200608 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200609
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100610 /* All Bluetooth 1.2 and later controllers should support the
611 * HCI command for reading the local supported commands.
612 *
613 * Unfortunately some controllers indicate Bluetooth 1.2 support,
614 * but do not have support for this command. If that is the case,
615 * the driver can quirk the behavior and skip reading the local
616 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300617 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100618 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
619 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200620 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200621
622 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700623 /* When SSP is available, then the host features page
624 * should also be available as well. However some
625 * controllers list the max_page as 0 as long as SSP
626 * has not been enabled. To achieve proper debugging
627 * output, force the minimum max_page to 1 at least.
628 */
629 hdev->max_page = 0x01;
630
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700631 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200632 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800633
Johan Hedberg42c6b122013-03-05 20:37:49 +0200634 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
635 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200636 } else {
637 struct hci_cp_write_eir cp;
638
639 memset(hdev->eir, 0, sizeof(hdev->eir));
640 memset(&cp, 0, sizeof(cp));
641
Johan Hedberg42c6b122013-03-05 20:37:49 +0200642 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200643 }
644 }
645
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800646 if (lmp_inq_rssi_capable(hdev) ||
647 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800648 u8 mode;
649
650 /* If Extended Inquiry Result events are supported, then
651 * they are clearly preferred over Inquiry Result with RSSI
652 * events.
653 */
654 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
655
656 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
657 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200658
659 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200660 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200661
662 if (lmp_ext_feat_capable(hdev)) {
663 struct hci_cp_read_local_ext_features cp;
664
665 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200666 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
667 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200668 }
669
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700670 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200671 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200672 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
673 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200674 }
675}
676
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200678{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200679 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200680 struct hci_cp_write_def_link_policy cp;
681 u16 link_policy = 0;
682
683 if (lmp_rswitch_capable(hdev))
684 link_policy |= HCI_LP_RSWITCH;
685 if (lmp_hold_capable(hdev))
686 link_policy |= HCI_LP_HOLD;
687 if (lmp_sniff_capable(hdev))
688 link_policy |= HCI_LP_SNIFF;
689 if (lmp_park_capable(hdev))
690 link_policy |= HCI_LP_PARK;
691
692 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200693 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200694}
695
Johan Hedberg42c6b122013-03-05 20:37:49 +0200696static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200697{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200698 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200699 struct hci_cp_write_le_host_supported cp;
700
Johan Hedbergc73eee92013-04-19 18:35:21 +0300701 /* LE-only devices do not support explicit enablement */
702 if (!lmp_bredr_capable(hdev))
703 return;
704
Johan Hedberg2177bab2013-03-05 20:37:43 +0200705 memset(&cp, 0, sizeof(cp));
706
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700707 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200708 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200709 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200710 }
711
712 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200713 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
714 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200715}
716
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300717static void hci_set_event_mask_page_2(struct hci_request *req)
718{
719 struct hci_dev *hdev = req->hdev;
720 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
721
722 /* If Connectionless Slave Broadcast master role is supported
723 * enable all necessary events for it.
724 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800725 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300726 events[1] |= 0x40; /* Triggered Clock Capture */
727 events[1] |= 0x80; /* Synchronization Train Complete */
728 events[2] |= 0x10; /* Slave Page Response Timeout */
729 events[2] |= 0x20; /* CSB Channel Map Change */
730 }
731
732 /* If Connectionless Slave Broadcast slave role is supported
733 * enable all necessary events for it.
734 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800735 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300736 events[2] |= 0x01; /* Synchronization Train Received */
737 events[2] |= 0x02; /* CSB Receive */
738 events[2] |= 0x04; /* CSB Timeout */
739 events[2] |= 0x08; /* Truncated Page Complete */
740 }
741
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800742 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200743 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800744 events[2] |= 0x80;
745
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300746 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
747}
748
Johan Hedberg42c6b122013-03-05 20:37:49 +0200749static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200750{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200751 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300752 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200753
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200754 hci_setup_event_mask(req);
755
Johan Hedberge81be902015-08-30 21:47:20 +0300756 if (hdev->commands[6] & 0x20 &&
757 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800758 struct hci_cp_read_stored_link_key cp;
759
760 bacpy(&cp.bdaddr, BDADDR_ANY);
761 cp.read_all = 0x01;
762 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
763 }
764
Johan Hedberg2177bab2013-03-05 20:37:43 +0200765 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200766 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200767
Marcel Holtmann417287d2014-12-11 20:21:54 +0100768 if (hdev->commands[8] & 0x01)
769 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
770
771 /* Some older Broadcom based Bluetooth 1.2 controllers do not
772 * support the Read Page Scan Type command. Check support for
773 * this command in the bit mask of supported commands.
774 */
775 if (hdev->commands[13] & 0x01)
776 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
777
Andre Guedes9193c6e2014-07-01 18:10:09 -0300778 if (lmp_le_capable(hdev)) {
779 u8 events[8];
780
781 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200782 events[0] = 0x0f;
783
784 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
785 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300786
787 /* If controller supports the Connection Parameters Request
788 * Link Layer Procedure, enable the corresponding event.
789 */
790 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
791 events[0] |= 0x20; /* LE Remote Connection
792 * Parameter Request
793 */
794
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100795 /* If the controller supports the Data Length Extension
796 * feature, enable the corresponding event.
797 */
798 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
799 events[0] |= 0x40; /* LE Data Length Change */
800
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100801 /* If the controller supports Extended Scanner Filter
802 * Policies, enable the correspondig event.
803 */
804 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
805 events[1] |= 0x04; /* LE Direct Advertising
806 * Report
807 */
808
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100809 /* If the controller supports the LE Read Local P-256
810 * Public Key command, enable the corresponding event.
811 */
812 if (hdev->commands[34] & 0x02)
813 events[0] |= 0x80; /* LE Read Local P-256
814 * Public Key Complete
815 */
816
817 /* If the controller supports the LE Generate DHKey
818 * command, enable the corresponding event.
819 */
820 if (hdev->commands[34] & 0x04)
821 events[1] |= 0x01; /* LE Generate DHKey Complete */
822
Andre Guedes9193c6e2014-07-01 18:10:09 -0300823 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
824 events);
825
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200826 if (hdev->commands[25] & 0x40) {
827 /* Read LE Advertising Channel TX Power */
828 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
829 }
830
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100831 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
832 /* Read LE Maximum Data Length */
833 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
834
835 /* Read LE Suggested Default Data Length */
836 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
837 }
838
Johan Hedberg42c6b122013-03-05 20:37:49 +0200839 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300840 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300841
842 /* Read features beyond page 1 if available */
843 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
844 struct hci_cp_read_local_ext_features cp;
845
846 cp.page = p;
847 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
848 sizeof(cp), &cp);
849 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200850}
851
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300852static void hci_init4_req(struct hci_request *req, unsigned long opt)
853{
854 struct hci_dev *hdev = req->hdev;
855
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800856 /* Some Broadcom based Bluetooth controllers do not support the
857 * Delete Stored Link Key command. They are clearly indicating its
858 * absence in the bit mask of supported commands.
859 *
860 * Check the supported commands and only if the the command is marked
861 * as supported send it. If not supported assume that the controller
862 * does not have actual support for stored link keys which makes this
863 * command redundant anyway.
864 *
865 * Some controllers indicate that they support handling deleting
866 * stored link keys, but they don't. The quirk lets a driver
867 * just disable this command.
868 */
869 if (hdev->commands[6] & 0x80 &&
870 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
871 struct hci_cp_delete_stored_link_key cp;
872
873 bacpy(&cp.bdaddr, BDADDR_ANY);
874 cp.delete_all = 0x01;
875 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
876 sizeof(cp), &cp);
877 }
878
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300879 /* Set event mask page 2 if the HCI command for it is supported */
880 if (hdev->commands[22] & 0x04)
881 hci_set_event_mask_page_2(req);
882
Marcel Holtmann109e3192014-07-23 19:24:56 +0200883 /* Read local codec list if the HCI command is supported */
884 if (hdev->commands[29] & 0x20)
885 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
886
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200887 /* Get MWS transport configuration if the HCI command is supported */
888 if (hdev->commands[30] & 0x08)
889 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
890
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300891 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800892 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300893 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800894
895 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700896 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800897 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800898 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800899
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800900 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
901 sizeof(support), &support);
902 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300903}
904
Johan Hedberg2177bab2013-03-05 20:37:43 +0200905static int __hci_init(struct hci_dev *hdev)
906{
907 int err;
908
909 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
910 if (err < 0)
911 return err;
912
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200913 if (hci_dev_test_flag(hdev, HCI_SETUP))
914 hci_debugfs_create_basic(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700915
Johan Hedberg2177bab2013-03-05 20:37:43 +0200916 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
917 if (err < 0)
918 return err;
919
Johan Hedberg0af801b2015-02-17 15:05:21 +0200920 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
921 * BR/EDR/LE type controllers. AMP controllers only need the
922 * first two stages of init.
923 */
924 if (hdev->dev_type != HCI_BREDR)
925 return 0;
926
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300927 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
928 if (err < 0)
929 return err;
930
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700931 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
932 if (err < 0)
933 return err;
934
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800935 /* This function is only called when the controller is actually in
936 * configured state. When the controller is marked as unconfigured,
937 * this initialization procedure is not run.
938 *
939 * It means that it is possible that a controller runs through its
940 * setup phase and then discovers missing settings. If that is the
941 * case, then this function will not be called. It then will only
942 * be called during the config phase.
943 *
944 * So only when in setup phase or config phase, create the debugfs
945 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700946 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700947 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
948 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700949 return 0;
950
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100951 hci_debugfs_create_common(hdev);
952
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100953 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100954 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700955
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800956 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100957 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700958
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700959 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200960}
961
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200962static void hci_init0_req(struct hci_request *req, unsigned long opt)
963{
964 struct hci_dev *hdev = req->hdev;
965
966 BT_DBG("%s %ld", hdev->name, opt);
967
968 /* Reset */
969 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
970 hci_reset_req(req, 0);
971
972 /* Read Local Version */
973 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
974
975 /* Read BD Address */
976 if (hdev->set_bdaddr)
977 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
978}
979
980static int __hci_unconf_init(struct hci_dev *hdev)
981{
982 int err;
983
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200984 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
985 return 0;
986
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200987 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
988 if (err < 0)
989 return err;
990
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200991 if (hci_dev_test_flag(hdev, HCI_SETUP))
992 hci_debugfs_create_basic(hdev);
993
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200994 return 0;
995}
996
Johan Hedberg42c6b122013-03-05 20:37:49 +0200997static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998{
999 __u8 scan = opt;
1000
Johan Hedberg42c6b122013-03-05 20:37:49 +02001001 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
1003 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005}
1006
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008{
1009 __u8 auth = opt;
1010
Johan Hedberg42c6b122013-03-05 20:37:49 +02001011 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
1013 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001014 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015}
1016
Johan Hedberg42c6b122013-03-05 20:37:49 +02001017static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018{
1019 __u8 encrypt = opt;
1020
Johan Hedberg42c6b122013-03-05 20:37:49 +02001021 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001023 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001024 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025}
1026
Johan Hedberg42c6b122013-03-05 20:37:49 +02001027static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001028{
1029 __le16 policy = cpu_to_le16(opt);
1030
Johan Hedberg42c6b122013-03-05 20:37:49 +02001031 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001032
1033 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001034 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001035}
1036
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001037/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 * Device is held on return. */
1039struct hci_dev *hci_dev_get(int index)
1040{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001041 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
1043 BT_DBG("%d", index);
1044
1045 if (index < 0)
1046 return NULL;
1047
1048 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001049 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 if (d->id == index) {
1051 hdev = hci_dev_hold(d);
1052 break;
1053 }
1054 }
1055 read_unlock(&hci_dev_list_lock);
1056 return hdev;
1057}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
1059/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001060
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001061bool hci_discovery_active(struct hci_dev *hdev)
1062{
1063 struct discovery_state *discov = &hdev->discovery;
1064
Andre Guedes6fbe1952012-02-03 17:47:58 -03001065 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001066 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001067 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001068 return true;
1069
Andre Guedes6fbe1952012-02-03 17:47:58 -03001070 default:
1071 return false;
1072 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001073}
1074
Johan Hedbergff9ef572012-01-04 14:23:45 +02001075void hci_discovery_set_state(struct hci_dev *hdev, int state)
1076{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001077 int old_state = hdev->discovery.state;
1078
Johan Hedbergff9ef572012-01-04 14:23:45 +02001079 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1080
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001081 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001082 return;
1083
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001084 hdev->discovery.state = state;
1085
Johan Hedbergff9ef572012-01-04 14:23:45 +02001086 switch (state) {
1087 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001088 hci_update_background_scan(hdev);
1089
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001090 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001091 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001092 break;
1093 case DISCOVERY_STARTING:
1094 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001095 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001096 mgmt_discovering(hdev, 1);
1097 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001098 case DISCOVERY_RESOLVING:
1099 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001100 case DISCOVERY_STOPPING:
1101 break;
1102 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001103}
1104
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001105void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106{
Johan Hedberg30883512012-01-04 14:16:21 +02001107 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001108 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Johan Hedberg561aafb2012-01-04 13:31:59 +02001110 list_for_each_entry_safe(p, n, &cache->all, all) {
1111 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001112 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001114
1115 INIT_LIST_HEAD(&cache->unknown);
1116 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117}
1118
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001119struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1120 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121{
Johan Hedberg30883512012-01-04 14:16:21 +02001122 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 struct inquiry_entry *e;
1124
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001125 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
Johan Hedberg561aafb2012-01-04 13:31:59 +02001127 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001129 return e;
1130 }
1131
1132 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133}
1134
Johan Hedberg561aafb2012-01-04 13:31:59 +02001135struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001136 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001137{
Johan Hedberg30883512012-01-04 14:16:21 +02001138 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001139 struct inquiry_entry *e;
1140
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001141 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001142
1143 list_for_each_entry(e, &cache->unknown, list) {
1144 if (!bacmp(&e->data.bdaddr, bdaddr))
1145 return e;
1146 }
1147
1148 return NULL;
1149}
1150
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001151struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001152 bdaddr_t *bdaddr,
1153 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001154{
1155 struct discovery_state *cache = &hdev->discovery;
1156 struct inquiry_entry *e;
1157
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001158 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001159
1160 list_for_each_entry(e, &cache->resolve, list) {
1161 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1162 return e;
1163 if (!bacmp(&e->data.bdaddr, bdaddr))
1164 return e;
1165 }
1166
1167 return NULL;
1168}
1169
Johan Hedberga3d4e202012-01-09 00:53:02 +02001170void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001171 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001172{
1173 struct discovery_state *cache = &hdev->discovery;
1174 struct list_head *pos = &cache->resolve;
1175 struct inquiry_entry *p;
1176
1177 list_del(&ie->list);
1178
1179 list_for_each_entry(p, &cache->resolve, list) {
1180 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001181 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001182 break;
1183 pos = &p->list;
1184 }
1185
1186 list_add(&ie->list, pos);
1187}
1188
Marcel Holtmannaf589252014-07-01 14:11:20 +02001189u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1190 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191{
Johan Hedberg30883512012-01-04 14:16:21 +02001192 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001193 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001194 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001196 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
Johan Hedberg6928a922014-10-26 20:46:09 +01001198 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001199
Marcel Holtmannaf589252014-07-01 14:11:20 +02001200 if (!data->ssp_mode)
1201 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001202
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001203 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001204 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001205 if (!ie->data.ssp_mode)
1206 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001207
Johan Hedberga3d4e202012-01-09 00:53:02 +02001208 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001209 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001210 ie->data.rssi = data->rssi;
1211 hci_inquiry_cache_update_resolve(hdev, ie);
1212 }
1213
Johan Hedberg561aafb2012-01-04 13:31:59 +02001214 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001215 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001216
Johan Hedberg561aafb2012-01-04 13:31:59 +02001217 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001218 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001219 if (!ie) {
1220 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1221 goto done;
1222 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001223
1224 list_add(&ie->all, &cache->all);
1225
1226 if (name_known) {
1227 ie->name_state = NAME_KNOWN;
1228 } else {
1229 ie->name_state = NAME_NOT_KNOWN;
1230 list_add(&ie->list, &cache->unknown);
1231 }
1232
1233update:
1234 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001235 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001236 ie->name_state = NAME_KNOWN;
1237 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 }
1239
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001240 memcpy(&ie->data, data, sizeof(*data));
1241 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001243
1244 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001245 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001246
Marcel Holtmannaf589252014-07-01 14:11:20 +02001247done:
1248 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249}
1250
1251static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1252{
Johan Hedberg30883512012-01-04 14:16:21 +02001253 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 struct inquiry_info *info = (struct inquiry_info *) buf;
1255 struct inquiry_entry *e;
1256 int copied = 0;
1257
Johan Hedberg561aafb2012-01-04 13:31:59 +02001258 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001260
1261 if (copied >= num)
1262 break;
1263
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 bacpy(&info->bdaddr, &data->bdaddr);
1265 info->pscan_rep_mode = data->pscan_rep_mode;
1266 info->pscan_period_mode = data->pscan_period_mode;
1267 info->pscan_mode = data->pscan_mode;
1268 memcpy(info->dev_class, data->dev_class, 3);
1269 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001270
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001272 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 }
1274
1275 BT_DBG("cache %p, copied %d", cache, copied);
1276 return copied;
1277}
1278
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280{
1281 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001282 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 struct hci_cp_inquiry cp;
1284
1285 BT_DBG("%s", hdev->name);
1286
1287 if (test_bit(HCI_INQUIRY, &hdev->flags))
1288 return;
1289
1290 /* Start Inquiry */
1291 memcpy(&cp.lap, &ir->lap, 3);
1292 cp.length = ir->length;
1293 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295}
1296
1297int hci_inquiry(void __user *arg)
1298{
1299 __u8 __user *ptr = arg;
1300 struct hci_inquiry_req ir;
1301 struct hci_dev *hdev;
1302 int err = 0, do_inquiry = 0, max_rsp;
1303 long timeo;
1304 __u8 *buf;
1305
1306 if (copy_from_user(&ir, ptr, sizeof(ir)))
1307 return -EFAULT;
1308
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001309 hdev = hci_dev_get(ir.dev_id);
1310 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 return -ENODEV;
1312
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001313 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001314 err = -EBUSY;
1315 goto done;
1316 }
1317
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001318 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001319 err = -EOPNOTSUPP;
1320 goto done;
1321 }
1322
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001323 if (hdev->dev_type != HCI_BREDR) {
1324 err = -EOPNOTSUPP;
1325 goto done;
1326 }
1327
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001328 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001329 err = -EOPNOTSUPP;
1330 goto done;
1331 }
1332
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001333 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001334 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001335 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001336 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 do_inquiry = 1;
1338 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001339 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
Marcel Holtmann04837f62006-07-03 10:02:33 +02001341 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001342
1343 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001344 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1345 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001346 if (err < 0)
1347 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001348
1349 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1350 * cleared). If it is interrupted by a signal, return -EINTR.
1351 */
NeilBrown74316202014-07-07 15:16:04 +10001352 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001353 TASK_INTERRUPTIBLE))
1354 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001355 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001357 /* for unlimited number of responses we will use buffer with
1358 * 255 entries
1359 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1361
1362 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1363 * copy it to the user space.
1364 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001365 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001366 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 err = -ENOMEM;
1368 goto done;
1369 }
1370
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001371 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001373 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
1375 BT_DBG("num_rsp %d", ir.num_rsp);
1376
1377 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1378 ptr += sizeof(ir);
1379 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001380 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001382 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 err = -EFAULT;
1384
1385 kfree(buf);
1386
1387done:
1388 hci_dev_put(hdev);
1389 return err;
1390}
1391
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001392static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 int ret = 0;
1395
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 BT_DBG("%s %p", hdev->name, hdev);
1397
1398 hci_req_lock(hdev);
1399
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001400 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001401 ret = -ENODEV;
1402 goto done;
1403 }
1404
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001405 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1406 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001407 /* Check for rfkill but allow the HCI setup stage to
1408 * proceed (which in itself doesn't cause any RF activity).
1409 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001410 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001411 ret = -ERFKILL;
1412 goto done;
1413 }
1414
1415 /* Check for valid public address or a configured static
1416 * random adddress, but let the HCI setup proceed to
1417 * be able to determine if there is a public address
1418 * or not.
1419 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001420 * In case of user channel usage, it is not important
1421 * if a public address or static random address is
1422 * available.
1423 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001424 * This check is only valid for BR/EDR controllers
1425 * since AMP controllers do not have an address.
1426 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001427 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001428 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001429 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1430 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1431 ret = -EADDRNOTAVAIL;
1432 goto done;
1433 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001434 }
1435
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 if (test_bit(HCI_UP, &hdev->flags)) {
1437 ret = -EALREADY;
1438 goto done;
1439 }
1440
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 if (hdev->open(hdev)) {
1442 ret = -EIO;
1443 goto done;
1444 }
1445
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001446 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001447 hci_notify(hdev, HCI_DEV_OPEN);
1448
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001449 atomic_set(&hdev->cmd_cnt, 1);
1450 set_bit(HCI_INIT, &hdev->flags);
1451
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001452 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001453 if (hdev->setup)
1454 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001455
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001456 /* The transport driver can set these quirks before
1457 * creating the HCI device or in its setup callback.
1458 *
1459 * In case any of them is set, the controller has to
1460 * start up as unconfigured.
1461 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001462 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1463 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001464 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001465
1466 /* For an unconfigured controller it is required to
1467 * read at least the version information provided by
1468 * the Read Local Version Information command.
1469 *
1470 * If the set_bdaddr driver callback is provided, then
1471 * also the original Bluetooth public device address
1472 * will be read using the Read BD Address command.
1473 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001474 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001475 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001476 }
1477
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001478 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001479 /* If public address change is configured, ensure that
1480 * the address gets programmed. If the driver does not
1481 * support changing the public address, fail the power
1482 * on procedure.
1483 */
1484 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1485 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001486 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1487 else
1488 ret = -EADDRNOTAVAIL;
1489 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001490
1491 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001492 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1493 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001494 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 }
1496
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001497 clear_bit(HCI_INIT, &hdev->flags);
1498
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 if (!ret) {
1500 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001501 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 set_bit(HCI_UP, &hdev->flags);
1503 hci_notify(hdev, HCI_DEV_UP);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001504 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1505 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1506 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1507 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001508 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001509 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001510 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001511 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001512 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001513 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001515 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001516 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001517 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
1519 skb_queue_purge(&hdev->cmd_q);
1520 skb_queue_purge(&hdev->rx_q);
1521
1522 if (hdev->flush)
1523 hdev->flush(hdev);
1524
1525 if (hdev->sent_cmd) {
1526 kfree_skb(hdev->sent_cmd);
1527 hdev->sent_cmd = NULL;
1528 }
1529
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001530 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001531 hci_notify(hdev, HCI_DEV_CLOSE);
1532
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001534 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 }
1536
1537done:
1538 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 return ret;
1540}
1541
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001542/* ---- HCI ioctl helpers ---- */
1543
1544int hci_dev_open(__u16 dev)
1545{
1546 struct hci_dev *hdev;
1547 int err;
1548
1549 hdev = hci_dev_get(dev);
1550 if (!hdev)
1551 return -ENODEV;
1552
Marcel Holtmann4a964402014-07-02 19:10:33 +02001553 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001554 * up as user channel. Trying to bring them up as normal devices
1555 * will result into a failure. Only user channel operation is
1556 * possible.
1557 *
1558 * When this function is called for a user channel, the flag
1559 * HCI_USER_CHANNEL will be set first before attempting to
1560 * open the device.
1561 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001562 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1563 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001564 err = -EOPNOTSUPP;
1565 goto done;
1566 }
1567
Johan Hedberge1d08f42013-10-01 22:44:50 +03001568 /* We need to ensure that no other power on/off work is pending
1569 * before proceeding to call hci_dev_do_open. This is
1570 * particularly important if the setup procedure has not yet
1571 * completed.
1572 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001573 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001574 cancel_delayed_work(&hdev->power_off);
1575
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001576 /* After this call it is guaranteed that the setup procedure
1577 * has finished. This means that error conditions like RFKILL
1578 * or no valid public or static random address apply.
1579 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001580 flush_workqueue(hdev->req_workqueue);
1581
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001582 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001583 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001584 * so that pairing works for them. Once the management interface
1585 * is in use this bit will be cleared again and userspace has
1586 * to explicitly enable it.
1587 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001588 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1589 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001590 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001591
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001592 err = hci_dev_do_open(hdev);
1593
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001594done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001595 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001596 return err;
1597}
1598
Johan Hedbergd7347f32014-07-04 12:37:23 +03001599/* This function requires the caller holds hdev->lock */
1600static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1601{
1602 struct hci_conn_params *p;
1603
Johan Hedbergf161dd42014-08-15 21:06:54 +03001604 list_for_each_entry(p, &hdev->le_conn_params, list) {
1605 if (p->conn) {
1606 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001607 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001608 p->conn = NULL;
1609 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001610 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001611 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001612
1613 BT_DBG("All LE pending actions cleared");
1614}
1615
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001616int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617{
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001618 bool auto_off;
1619
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 BT_DBG("%s %p", hdev->name, hdev);
1621
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001622 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001623 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001624 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001625 /* Execute vendor specific shutdown routine */
1626 if (hdev->shutdown)
1627 hdev->shutdown(hdev);
1628 }
1629
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001630 cancel_delayed_work(&hdev->power_off);
1631
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 hci_req_cancel(hdev, ENODEV);
1633 hci_req_lock(hdev);
1634
1635 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001636 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 hci_req_unlock(hdev);
1638 return 0;
1639 }
1640
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001641 /* Flush RX and TX works */
1642 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001643 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001645 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001646 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001647 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001648 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1649 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001650 }
1651
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001652 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001653 cancel_delayed_work(&hdev->service_cache);
1654
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001655 cancel_delayed_work_sync(&hdev->le_scan_disable);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08001656 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001657
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001658 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001659 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001660
Florian Grandel5d900e42015-06-18 03:16:35 +02001661 if (hdev->adv_instance_timeout) {
1662 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1663 hdev->adv_instance_timeout = 0;
1664 }
1665
Johan Hedberg76727c02014-11-18 09:00:14 +02001666 /* Avoid potential lockdep warnings from the *_flush() calls by
1667 * ensuring the workqueue is empty up front.
1668 */
1669 drain_workqueue(hdev->workqueue);
1670
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001671 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001672
Johan Hedberg8f502f82015-01-28 19:56:02 +02001673 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1674
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001675 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1676
1677 if (!auto_off && hdev->dev_type == HCI_BREDR)
1678 mgmt_powered(hdev, 0);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001679
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001680 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001681 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001682 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001683 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684
Marcel Holtmann64dae962015-01-28 14:10:28 -08001685 smp_unregister(hdev);
1686
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 hci_notify(hdev, HCI_DEV_DOWN);
1688
1689 if (hdev->flush)
1690 hdev->flush(hdev);
1691
1692 /* Reset device */
1693 skb_queue_purge(&hdev->cmd_q);
1694 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001695 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1696 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001698 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 clear_bit(HCI_INIT, &hdev->flags);
1700 }
1701
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001702 /* flush cmd work */
1703 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
1705 /* Drop queues */
1706 skb_queue_purge(&hdev->rx_q);
1707 skb_queue_purge(&hdev->cmd_q);
1708 skb_queue_purge(&hdev->raw_q);
1709
1710 /* Drop last sent command */
1711 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001712 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 kfree_skb(hdev->sent_cmd);
1714 hdev->sent_cmd = NULL;
1715 }
1716
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001717 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001718 hci_notify(hdev, HCI_DEV_CLOSE);
1719
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 /* After this point our queues are empty
1721 * and no tasks are scheduled. */
1722 hdev->close(hdev);
1723
Johan Hedberg35b973c2013-03-15 17:06:59 -05001724 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001725 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001726 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001727
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001728 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001729 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001730
Johan Hedberge59fda82012-02-22 18:11:53 +02001731 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001732 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001733 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001734
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 hci_req_unlock(hdev);
1736
1737 hci_dev_put(hdev);
1738 return 0;
1739}
1740
1741int hci_dev_close(__u16 dev)
1742{
1743 struct hci_dev *hdev;
1744 int err;
1745
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001746 hdev = hci_dev_get(dev);
1747 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001749
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001750 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001751 err = -EBUSY;
1752 goto done;
1753 }
1754
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001755 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001756 cancel_delayed_work(&hdev->power_off);
1757
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001759
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001760done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 hci_dev_put(hdev);
1762 return err;
1763}
1764
Marcel Holtmann5c912492015-01-28 11:53:05 -08001765static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001767 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
Marcel Holtmann5c912492015-01-28 11:53:05 -08001769 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
1771 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 /* Drop queues */
1774 skb_queue_purge(&hdev->rx_q);
1775 skb_queue_purge(&hdev->cmd_q);
1776
Johan Hedberg76727c02014-11-18 09:00:14 +02001777 /* Avoid potential lockdep warnings from the *_flush() calls by
1778 * ensuring the workqueue is empty up front.
1779 */
1780 drain_workqueue(hdev->workqueue);
1781
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001782 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001783 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001785 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
1787 if (hdev->flush)
1788 hdev->flush(hdev);
1789
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001790 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001791 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001793 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 return ret;
1797}
1798
Marcel Holtmann5c912492015-01-28 11:53:05 -08001799int hci_dev_reset(__u16 dev)
1800{
1801 struct hci_dev *hdev;
1802 int err;
1803
1804 hdev = hci_dev_get(dev);
1805 if (!hdev)
1806 return -ENODEV;
1807
1808 if (!test_bit(HCI_UP, &hdev->flags)) {
1809 err = -ENETDOWN;
1810 goto done;
1811 }
1812
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001813 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001814 err = -EBUSY;
1815 goto done;
1816 }
1817
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001818 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001819 err = -EOPNOTSUPP;
1820 goto done;
1821 }
1822
1823 err = hci_dev_do_reset(hdev);
1824
1825done:
1826 hci_dev_put(hdev);
1827 return err;
1828}
1829
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830int hci_dev_reset_stat(__u16 dev)
1831{
1832 struct hci_dev *hdev;
1833 int ret = 0;
1834
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001835 hdev = hci_dev_get(dev);
1836 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 return -ENODEV;
1838
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001839 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001840 ret = -EBUSY;
1841 goto done;
1842 }
1843
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001844 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001845 ret = -EOPNOTSUPP;
1846 goto done;
1847 }
1848
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1850
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001851done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 return ret;
1854}
1855
Johan Hedberg123abc02014-07-10 12:09:07 +03001856static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1857{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001858 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001859
1860 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1861
1862 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001863 conn_changed = !hci_dev_test_and_set_flag(hdev,
1864 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001865 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001866 conn_changed = hci_dev_test_and_clear_flag(hdev,
1867 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001868
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001869 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001870 discov_changed = !hci_dev_test_and_set_flag(hdev,
1871 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001872 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001873 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001874 discov_changed = hci_dev_test_and_clear_flag(hdev,
1875 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001876 }
1877
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001878 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001879 return;
1880
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001881 if (conn_changed || discov_changed) {
1882 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001883 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001884
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001885 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001886 mgmt_update_adv_data(hdev);
1887
Johan Hedberg123abc02014-07-10 12:09:07 +03001888 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001889 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001890}
1891
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892int hci_dev_cmd(unsigned int cmd, void __user *arg)
1893{
1894 struct hci_dev *hdev;
1895 struct hci_dev_req dr;
1896 int err = 0;
1897
1898 if (copy_from_user(&dr, arg, sizeof(dr)))
1899 return -EFAULT;
1900
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001901 hdev = hci_dev_get(dr.dev_id);
1902 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 return -ENODEV;
1904
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001905 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001906 err = -EBUSY;
1907 goto done;
1908 }
1909
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001910 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001911 err = -EOPNOTSUPP;
1912 goto done;
1913 }
1914
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001915 if (hdev->dev_type != HCI_BREDR) {
1916 err = -EOPNOTSUPP;
1917 goto done;
1918 }
1919
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001920 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001921 err = -EOPNOTSUPP;
1922 goto done;
1923 }
1924
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 switch (cmd) {
1926 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001927 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1928 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 break;
1930
1931 case HCISETENCRYPT:
1932 if (!lmp_encrypt_capable(hdev)) {
1933 err = -EOPNOTSUPP;
1934 break;
1935 }
1936
1937 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1938 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001939 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1940 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 if (err)
1942 break;
1943 }
1944
Johan Hedberg01178cd2013-03-05 20:37:41 +02001945 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1946 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 break;
1948
1949 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001950 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1951 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001952
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001953 /* Ensure that the connectable and discoverable states
1954 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001955 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001956 if (!err)
1957 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 break;
1959
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001960 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001961 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1962 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001963 break;
1964
1965 case HCISETLINKMODE:
1966 hdev->link_mode = ((__u16) dr.dev_opt) &
1967 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1968 break;
1969
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 case HCISETPTYPE:
1971 hdev->pkt_type = (__u16) dr.dev_opt;
1972 break;
1973
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001975 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1976 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 break;
1978
1979 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001980 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1981 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 break;
1983
1984 default:
1985 err = -EINVAL;
1986 break;
1987 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001988
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001989done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 hci_dev_put(hdev);
1991 return err;
1992}
1993
1994int hci_get_dev_list(void __user *arg)
1995{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001996 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 struct hci_dev_list_req *dl;
1998 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 int n = 0, size, err;
2000 __u16 dev_num;
2001
2002 if (get_user(dev_num, (__u16 __user *) arg))
2003 return -EFAULT;
2004
2005 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2006 return -EINVAL;
2007
2008 size = sizeof(*dl) + dev_num * sizeof(*dr);
2009
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002010 dl = kzalloc(size, GFP_KERNEL);
2011 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 return -ENOMEM;
2013
2014 dr = dl->dev_req;
2015
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002016 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002017 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002018 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002019
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002020 /* When the auto-off is configured it means the transport
2021 * is running, but in that case still indicate that the
2022 * device is actually down.
2023 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002024 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002025 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002026
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002028 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002029
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 if (++n >= dev_num)
2031 break;
2032 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002033 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034
2035 dl->dev_num = n;
2036 size = sizeof(*dl) + n * sizeof(*dr);
2037
2038 err = copy_to_user(arg, dl, size);
2039 kfree(dl);
2040
2041 return err ? -EFAULT : 0;
2042}
2043
2044int hci_get_dev_info(void __user *arg)
2045{
2046 struct hci_dev *hdev;
2047 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002048 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 int err = 0;
2050
2051 if (copy_from_user(&di, arg, sizeof(di)))
2052 return -EFAULT;
2053
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002054 hdev = hci_dev_get(di.dev_id);
2055 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 return -ENODEV;
2057
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002058 /* When the auto-off is configured it means the transport
2059 * is running, but in that case still indicate that the
2060 * device is actually down.
2061 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002062 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002063 flags = hdev->flags & ~BIT(HCI_UP);
2064 else
2065 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002066
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 strcpy(di.name, hdev->name);
2068 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002069 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002070 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002072 if (lmp_bredr_capable(hdev)) {
2073 di.acl_mtu = hdev->acl_mtu;
2074 di.acl_pkts = hdev->acl_pkts;
2075 di.sco_mtu = hdev->sco_mtu;
2076 di.sco_pkts = hdev->sco_pkts;
2077 } else {
2078 di.acl_mtu = hdev->le_mtu;
2079 di.acl_pkts = hdev->le_pkts;
2080 di.sco_mtu = 0;
2081 di.sco_pkts = 0;
2082 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 di.link_policy = hdev->link_policy;
2084 di.link_mode = hdev->link_mode;
2085
2086 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2087 memcpy(&di.features, &hdev->features, sizeof(di.features));
2088
2089 if (copy_to_user(arg, &di, sizeof(di)))
2090 err = -EFAULT;
2091
2092 hci_dev_put(hdev);
2093
2094 return err;
2095}
2096
2097/* ---- Interface to HCI drivers ---- */
2098
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002099static int hci_rfkill_set_block(void *data, bool blocked)
2100{
2101 struct hci_dev *hdev = data;
2102
2103 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2104
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002105 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002106 return -EBUSY;
2107
Johan Hedberg5e130362013-09-13 08:58:17 +03002108 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002109 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002110 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2111 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002112 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002113 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002114 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002115 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002116
2117 return 0;
2118}
2119
2120static const struct rfkill_ops hci_rfkill_ops = {
2121 .set_block = hci_rfkill_set_block,
2122};
2123
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002124static void hci_power_on(struct work_struct *work)
2125{
2126 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002127 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002128
2129 BT_DBG("%s", hdev->name);
2130
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002131 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002132 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302133 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002134 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302135 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002136 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002137 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002138
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002139 /* During the HCI setup phase, a few error conditions are
2140 * ignored and they need to be checked now. If they are still
2141 * valid, it is important to turn the device back off.
2142 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002143 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2144 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002145 (hdev->dev_type == HCI_BREDR &&
2146 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2147 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002148 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002149 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002150 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002151 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2152 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002153 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002154
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002155 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002156 /* For unconfigured devices, set the HCI_RAW flag
2157 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002158 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002159 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002160 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002161
2162 /* For fully configured devices, this will send
2163 * the Index Added event. For unconfigured devices,
2164 * it will send Unconfigued Index Added event.
2165 *
2166 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2167 * and no event will be send.
2168 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002169 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002170 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002171 /* When the controller is now configured, then it
2172 * is important to clear the HCI_RAW flag.
2173 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002174 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002175 clear_bit(HCI_RAW, &hdev->flags);
2176
Marcel Holtmannd603b762014-07-06 12:11:14 +02002177 /* Powering on the controller with HCI_CONFIG set only
2178 * happens with the transition from unconfigured to
2179 * configured. This will send the Index Added event.
2180 */
2181 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002182 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002183}
2184
2185static void hci_power_off(struct work_struct *work)
2186{
Johan Hedberg32435532011-11-07 22:16:04 +02002187 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002188 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002189
2190 BT_DBG("%s", hdev->name);
2191
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002192 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002193}
2194
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002195static void hci_error_reset(struct work_struct *work)
2196{
2197 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2198
2199 BT_DBG("%s", hdev->name);
2200
2201 if (hdev->hw_error)
2202 hdev->hw_error(hdev, hdev->hw_error_code);
2203 else
2204 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2205 hdev->hw_error_code);
2206
2207 if (hci_dev_do_close(hdev))
2208 return;
2209
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002210 hci_dev_do_open(hdev);
2211}
2212
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002213static void hci_discov_off(struct work_struct *work)
2214{
2215 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002216
2217 hdev = container_of(work, struct hci_dev, discov_off.work);
2218
2219 BT_DBG("%s", hdev->name);
2220
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002221 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002222}
2223
Florian Grandel5d900e42015-06-18 03:16:35 +02002224static void hci_adv_timeout_expire(struct work_struct *work)
2225{
2226 struct hci_dev *hdev;
2227
2228 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2229
2230 BT_DBG("%s", hdev->name);
2231
2232 mgmt_adv_timeout_expired(hdev);
2233}
2234
Johan Hedberg35f74982014-02-18 17:14:32 +02002235void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002236{
Johan Hedberg48210022013-01-27 00:31:28 +02002237 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002238
Johan Hedberg48210022013-01-27 00:31:28 +02002239 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2240 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002241 kfree(uuid);
2242 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002243}
2244
Johan Hedberg35f74982014-02-18 17:14:32 +02002245void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002246{
Johan Hedberg0378b592014-11-19 15:22:22 +02002247 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002248
Johan Hedberg0378b592014-11-19 15:22:22 +02002249 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2250 list_del_rcu(&key->list);
2251 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002252 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002253}
2254
Johan Hedberg35f74982014-02-18 17:14:32 +02002255void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002256{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002257 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002258
Johan Hedberg970d0f12014-11-13 14:37:47 +02002259 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2260 list_del_rcu(&k->list);
2261 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002262 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002263}
2264
Johan Hedberg970c4e42014-02-18 10:19:33 +02002265void hci_smp_irks_clear(struct hci_dev *hdev)
2266{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002267 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002268
Johan Hedbergadae20c2014-11-13 14:37:48 +02002269 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2270 list_del_rcu(&k->list);
2271 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002272 }
2273}
2274
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002275struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2276{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002277 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002278
Johan Hedberg0378b592014-11-19 15:22:22 +02002279 rcu_read_lock();
2280 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2281 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2282 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002283 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002284 }
2285 }
2286 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002287
2288 return NULL;
2289}
2290
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302291static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002292 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002293{
2294 /* Legacy key */
2295 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302296 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002297
2298 /* Debug keys are insecure so don't store them persistently */
2299 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302300 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002301
2302 /* Changed combination key and there's no previous one */
2303 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302304 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002305
2306 /* Security mode 3 case */
2307 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302308 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002309
Johan Hedberge3befab2014-06-01 16:33:39 +03002310 /* BR/EDR key derived using SC from an LE link */
2311 if (conn->type == LE_LINK)
2312 return true;
2313
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002314 /* Neither local nor remote side had no-bonding as requirement */
2315 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302316 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002317
2318 /* Local side had dedicated bonding as requirement */
2319 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302320 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002321
2322 /* Remote side had dedicated bonding as requirement */
2323 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302324 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002325
2326 /* If none of the above criteria match, then don't store the key
2327 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302328 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002329}
2330
Johan Hedberge804d252014-07-16 11:42:28 +03002331static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002332{
Johan Hedberge804d252014-07-16 11:42:28 +03002333 if (type == SMP_LTK)
2334 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002335
Johan Hedberge804d252014-07-16 11:42:28 +03002336 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002337}
2338
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002339struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2340 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002341{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002342 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002343
Johan Hedberg970d0f12014-11-13 14:37:47 +02002344 rcu_read_lock();
2345 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002346 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2347 continue;
2348
Johan Hedberg923e2412014-12-03 12:43:39 +02002349 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002350 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002351 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002352 }
2353 }
2354 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002355
2356 return NULL;
2357}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002358
Johan Hedberg970c4e42014-02-18 10:19:33 +02002359struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2360{
2361 struct smp_irk *irk;
2362
Johan Hedbergadae20c2014-11-13 14:37:48 +02002363 rcu_read_lock();
2364 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2365 if (!bacmp(&irk->rpa, rpa)) {
2366 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002367 return irk;
2368 }
2369 }
2370
Johan Hedbergadae20c2014-11-13 14:37:48 +02002371 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2372 if (smp_irk_matches(hdev, irk->val, rpa)) {
2373 bacpy(&irk->rpa, rpa);
2374 rcu_read_unlock();
2375 return irk;
2376 }
2377 }
2378 rcu_read_unlock();
2379
Johan Hedberg970c4e42014-02-18 10:19:33 +02002380 return NULL;
2381}
2382
2383struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2384 u8 addr_type)
2385{
2386 struct smp_irk *irk;
2387
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002388 /* Identity Address must be public or static random */
2389 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2390 return NULL;
2391
Johan Hedbergadae20c2014-11-13 14:37:48 +02002392 rcu_read_lock();
2393 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002394 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002395 bacmp(bdaddr, &irk->bdaddr) == 0) {
2396 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002397 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002398 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002399 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002400 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002401
2402 return NULL;
2403}
2404
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002405struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002406 bdaddr_t *bdaddr, u8 *val, u8 type,
2407 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002408{
2409 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302410 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002411
2412 old_key = hci_find_link_key(hdev, bdaddr);
2413 if (old_key) {
2414 old_key_type = old_key->type;
2415 key = old_key;
2416 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002417 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002418 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002419 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002420 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002421 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002422 }
2423
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002424 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002425
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002426 /* Some buggy controller combinations generate a changed
2427 * combination key for legacy pairing even when there's no
2428 * previous key */
2429 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002430 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002431 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002432 if (conn)
2433 conn->key_type = type;
2434 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002435
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002436 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002437 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002438 key->pin_len = pin_len;
2439
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002440 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002441 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002442 else
2443 key->type = type;
2444
Johan Hedberg7652ff62014-06-24 13:15:49 +03002445 if (persistent)
2446 *persistent = hci_persistent_key(hdev, conn, type,
2447 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002448
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002449 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002450}
2451
Johan Hedbergca9142b2014-02-19 14:57:44 +02002452struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002453 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002454 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002455{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002456 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002457 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002458
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002459 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002460 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002461 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002462 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002463 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002464 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002465 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002466 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002467 }
2468
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002469 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002470 key->bdaddr_type = addr_type;
2471 memcpy(key->val, tk, sizeof(key->val));
2472 key->authenticated = authenticated;
2473 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002474 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002475 key->enc_size = enc_size;
2476 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002477
Johan Hedbergca9142b2014-02-19 14:57:44 +02002478 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002479}
2480
Johan Hedbergca9142b2014-02-19 14:57:44 +02002481struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2482 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002483{
2484 struct smp_irk *irk;
2485
2486 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2487 if (!irk) {
2488 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2489 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002490 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002491
2492 bacpy(&irk->bdaddr, bdaddr);
2493 irk->addr_type = addr_type;
2494
Johan Hedbergadae20c2014-11-13 14:37:48 +02002495 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002496 }
2497
2498 memcpy(irk->val, val, 16);
2499 bacpy(&irk->rpa, rpa);
2500
Johan Hedbergca9142b2014-02-19 14:57:44 +02002501 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002502}
2503
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002504int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2505{
2506 struct link_key *key;
2507
2508 key = hci_find_link_key(hdev, bdaddr);
2509 if (!key)
2510 return -ENOENT;
2511
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002512 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002513
Johan Hedberg0378b592014-11-19 15:22:22 +02002514 list_del_rcu(&key->list);
2515 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002516
2517 return 0;
2518}
2519
Johan Hedberge0b2b272014-02-18 17:14:31 +02002520int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002521{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002522 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002523 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002524
Johan Hedberg970d0f12014-11-13 14:37:47 +02002525 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002526 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002527 continue;
2528
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002529 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002530
Johan Hedberg970d0f12014-11-13 14:37:47 +02002531 list_del_rcu(&k->list);
2532 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002533 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002534 }
2535
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002536 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002537}
2538
Johan Hedberga7ec7332014-02-18 17:14:35 +02002539void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2540{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002541 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002542
Johan Hedbergadae20c2014-11-13 14:37:48 +02002543 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002544 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2545 continue;
2546
2547 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2548
Johan Hedbergadae20c2014-11-13 14:37:48 +02002549 list_del_rcu(&k->list);
2550 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002551 }
2552}
2553
Johan Hedberg55e76b32015-03-10 22:34:40 +02002554bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2555{
2556 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002557 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002558 u8 addr_type;
2559
2560 if (type == BDADDR_BREDR) {
2561 if (hci_find_link_key(hdev, bdaddr))
2562 return true;
2563 return false;
2564 }
2565
2566 /* Convert to HCI addr type which struct smp_ltk uses */
2567 if (type == BDADDR_LE_PUBLIC)
2568 addr_type = ADDR_LE_DEV_PUBLIC;
2569 else
2570 addr_type = ADDR_LE_DEV_RANDOM;
2571
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002572 irk = hci_get_irk(hdev, bdaddr, addr_type);
2573 if (irk) {
2574 bdaddr = &irk->bdaddr;
2575 addr_type = irk->addr_type;
2576 }
2577
Johan Hedberg55e76b32015-03-10 22:34:40 +02002578 rcu_read_lock();
2579 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002580 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2581 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002582 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002583 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002584 }
2585 rcu_read_unlock();
2586
2587 return false;
2588}
2589
Ville Tervo6bd32322011-02-16 16:32:41 +02002590/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002591static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002592{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002593 struct hci_dev *hdev = container_of(work, struct hci_dev,
2594 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002595
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002596 if (hdev->sent_cmd) {
2597 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2598 u16 opcode = __le16_to_cpu(sent->opcode);
2599
2600 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2601 } else {
2602 BT_ERR("%s command tx timeout", hdev->name);
2603 }
2604
Ville Tervo6bd32322011-02-16 16:32:41 +02002605 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002606 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002607}
2608
Szymon Janc2763eda2011-03-22 13:12:22 +01002609struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002610 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002611{
2612 struct oob_data *data;
2613
Johan Hedberg6928a922014-10-26 20:46:09 +01002614 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2615 if (bacmp(bdaddr, &data->bdaddr) != 0)
2616 continue;
2617 if (data->bdaddr_type != bdaddr_type)
2618 continue;
2619 return data;
2620 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002621
2622 return NULL;
2623}
2624
Johan Hedberg6928a922014-10-26 20:46:09 +01002625int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2626 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002627{
2628 struct oob_data *data;
2629
Johan Hedberg6928a922014-10-26 20:46:09 +01002630 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002631 if (!data)
2632 return -ENOENT;
2633
Johan Hedberg6928a922014-10-26 20:46:09 +01002634 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002635
2636 list_del(&data->list);
2637 kfree(data);
2638
2639 return 0;
2640}
2641
Johan Hedberg35f74982014-02-18 17:14:32 +02002642void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002643{
2644 struct oob_data *data, *n;
2645
2646 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2647 list_del(&data->list);
2648 kfree(data);
2649 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002650}
2651
Marcel Holtmann07988722014-01-10 02:07:29 -08002652int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002653 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002654 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002655{
2656 struct oob_data *data;
2657
Johan Hedberg6928a922014-10-26 20:46:09 +01002658 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002659 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002660 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002661 if (!data)
2662 return -ENOMEM;
2663
2664 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002665 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002666 list_add(&data->list, &hdev->remote_oob_data);
2667 }
2668
Johan Hedberg81328d52014-10-26 20:33:47 +01002669 if (hash192 && rand192) {
2670 memcpy(data->hash192, hash192, sizeof(data->hash192));
2671 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002672 if (hash256 && rand256)
2673 data->present = 0x03;
Johan Hedberg81328d52014-10-26 20:33:47 +01002674 } else {
2675 memset(data->hash192, 0, sizeof(data->hash192));
2676 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002677 if (hash256 && rand256)
2678 data->present = 0x02;
2679 else
2680 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002681 }
2682
Johan Hedberg81328d52014-10-26 20:33:47 +01002683 if (hash256 && rand256) {
2684 memcpy(data->hash256, hash256, sizeof(data->hash256));
2685 memcpy(data->rand256, rand256, sizeof(data->rand256));
2686 } else {
2687 memset(data->hash256, 0, sizeof(data->hash256));
2688 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002689 if (hash192 && rand192)
2690 data->present = 0x01;
Johan Hedberg81328d52014-10-26 20:33:47 +01002691 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002692
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002693 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002694
2695 return 0;
2696}
2697
Florian Grandeld2609b32015-06-18 03:16:34 +02002698/* This function requires the caller holds hdev->lock */
2699struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2700{
2701 struct adv_info *adv_instance;
2702
2703 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2704 if (adv_instance->instance == instance)
2705 return adv_instance;
2706 }
2707
2708 return NULL;
2709}
2710
2711/* This function requires the caller holds hdev->lock */
2712struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2713 struct adv_info *cur_instance;
2714
2715 cur_instance = hci_find_adv_instance(hdev, instance);
2716 if (!cur_instance)
2717 return NULL;
2718
2719 if (cur_instance == list_last_entry(&hdev->adv_instances,
2720 struct adv_info, list))
2721 return list_first_entry(&hdev->adv_instances,
2722 struct adv_info, list);
2723 else
2724 return list_next_entry(cur_instance, list);
2725}
2726
2727/* This function requires the caller holds hdev->lock */
2728int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2729{
2730 struct adv_info *adv_instance;
2731
2732 adv_instance = hci_find_adv_instance(hdev, instance);
2733 if (!adv_instance)
2734 return -ENOENT;
2735
2736 BT_DBG("%s removing %dMR", hdev->name, instance);
2737
Florian Grandel5d900e42015-06-18 03:16:35 +02002738 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2739 cancel_delayed_work(&hdev->adv_instance_expire);
2740 hdev->adv_instance_timeout = 0;
2741 }
2742
Florian Grandeld2609b32015-06-18 03:16:34 +02002743 list_del(&adv_instance->list);
2744 kfree(adv_instance);
2745
2746 hdev->adv_instance_cnt--;
2747
2748 return 0;
2749}
2750
2751/* This function requires the caller holds hdev->lock */
2752void hci_adv_instances_clear(struct hci_dev *hdev)
2753{
2754 struct adv_info *adv_instance, *n;
2755
Florian Grandel5d900e42015-06-18 03:16:35 +02002756 if (hdev->adv_instance_timeout) {
2757 cancel_delayed_work(&hdev->adv_instance_expire);
2758 hdev->adv_instance_timeout = 0;
2759 }
2760
Florian Grandeld2609b32015-06-18 03:16:34 +02002761 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2762 list_del(&adv_instance->list);
2763 kfree(adv_instance);
2764 }
2765
2766 hdev->adv_instance_cnt = 0;
2767}
2768
2769/* This function requires the caller holds hdev->lock */
2770int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2771 u16 adv_data_len, u8 *adv_data,
2772 u16 scan_rsp_len, u8 *scan_rsp_data,
2773 u16 timeout, u16 duration)
2774{
2775 struct adv_info *adv_instance;
2776
2777 adv_instance = hci_find_adv_instance(hdev, instance);
2778 if (adv_instance) {
2779 memset(adv_instance->adv_data, 0,
2780 sizeof(adv_instance->adv_data));
2781 memset(adv_instance->scan_rsp_data, 0,
2782 sizeof(adv_instance->scan_rsp_data));
2783 } else {
2784 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2785 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2786 return -EOVERFLOW;
2787
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002788 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002789 if (!adv_instance)
2790 return -ENOMEM;
2791
Florian Grandelfffd38b2015-06-18 03:16:47 +02002792 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002793 adv_instance->instance = instance;
2794 list_add(&adv_instance->list, &hdev->adv_instances);
2795 hdev->adv_instance_cnt++;
2796 }
2797
2798 adv_instance->flags = flags;
2799 adv_instance->adv_data_len = adv_data_len;
2800 adv_instance->scan_rsp_len = scan_rsp_len;
2801
2802 if (adv_data_len)
2803 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2804
2805 if (scan_rsp_len)
2806 memcpy(adv_instance->scan_rsp_data,
2807 scan_rsp_data, scan_rsp_len);
2808
2809 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002810 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002811
2812 if (duration == 0)
2813 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2814 else
2815 adv_instance->duration = duration;
2816
2817 BT_DBG("%s for %dMR", hdev->name, instance);
2818
2819 return 0;
2820}
2821
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002822struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002823 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002824{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002825 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002826
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002827 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002828 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002829 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002830 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002831
2832 return NULL;
2833}
2834
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002835void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002836{
2837 struct list_head *p, *n;
2838
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002839 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002840 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002841
2842 list_del(p);
2843 kfree(b);
2844 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002845}
2846
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002847int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002848{
2849 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002850
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002851 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002852 return -EBADF;
2853
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002854 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002855 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002856
Johan Hedberg27f70f32014-07-21 10:50:06 +03002857 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002858 if (!entry)
2859 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002860
2861 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002862 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002863
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002864 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002865
2866 return 0;
2867}
2868
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002869int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002870{
2871 struct bdaddr_list *entry;
2872
Johan Hedberg35f74982014-02-18 17:14:32 +02002873 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002874 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002875 return 0;
2876 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002877
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002878 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002879 if (!entry)
2880 return -ENOENT;
2881
2882 list_del(&entry->list);
2883 kfree(entry);
2884
2885 return 0;
2886}
2887
Andre Guedes15819a72014-02-03 13:56:18 -03002888/* This function requires the caller holds hdev->lock */
2889struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2890 bdaddr_t *addr, u8 addr_type)
2891{
2892 struct hci_conn_params *params;
2893
2894 list_for_each_entry(params, &hdev->le_conn_params, list) {
2895 if (bacmp(&params->addr, addr) == 0 &&
2896 params->addr_type == addr_type) {
2897 return params;
2898 }
2899 }
2900
2901 return NULL;
2902}
2903
2904/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002905struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2906 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002907{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002908 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002909
Johan Hedberg501f8822014-07-04 12:37:26 +03002910 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002911 if (bacmp(&param->addr, addr) == 0 &&
2912 param->addr_type == addr_type)
2913 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002914 }
2915
2916 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002917}
2918
2919/* This function requires the caller holds hdev->lock */
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02002920struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2921 bdaddr_t *addr,
2922 u8 addr_type)
2923{
2924 struct hci_conn_params *param;
2925
2926 list_for_each_entry(param, &hdev->pend_le_conns, action) {
2927 if (bacmp(&param->addr, addr) == 0 &&
2928 param->addr_type == addr_type &&
2929 param->explicit_connect)
2930 return param;
2931 }
2932
2933 list_for_each_entry(param, &hdev->pend_le_reports, action) {
2934 if (bacmp(&param->addr, addr) == 0 &&
2935 param->addr_type == addr_type &&
2936 param->explicit_connect)
2937 return param;
2938 }
2939
2940 return NULL;
2941}
2942
2943/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002944struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2945 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002946{
2947 struct hci_conn_params *params;
2948
2949 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002950 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002951 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002952
2953 params = kzalloc(sizeof(*params), GFP_KERNEL);
2954 if (!params) {
2955 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002956 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002957 }
2958
2959 bacpy(&params->addr, addr);
2960 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002961
2962 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002963 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002964
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002965 params->conn_min_interval = hdev->le_conn_min_interval;
2966 params->conn_max_interval = hdev->le_conn_max_interval;
2967 params->conn_latency = hdev->le_conn_latency;
2968 params->supervision_timeout = hdev->le_supv_timeout;
2969 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2970
2971 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2972
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002973 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002974}
2975
Johan Hedbergf6c63242014-08-15 21:06:59 +03002976static void hci_conn_params_free(struct hci_conn_params *params)
2977{
2978 if (params->conn) {
2979 hci_conn_drop(params->conn);
2980 hci_conn_put(params->conn);
2981 }
2982
2983 list_del(&params->action);
2984 list_del(&params->list);
2985 kfree(params);
2986}
2987
Andre Guedes15819a72014-02-03 13:56:18 -03002988/* This function requires the caller holds hdev->lock */
2989void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2990{
2991 struct hci_conn_params *params;
2992
2993 params = hci_conn_params_lookup(hdev, addr, addr_type);
2994 if (!params)
2995 return;
2996
Johan Hedbergf6c63242014-08-15 21:06:59 +03002997 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002998
Johan Hedberg95305ba2014-07-04 12:37:21 +03002999 hci_update_background_scan(hdev);
3000
Andre Guedes15819a72014-02-03 13:56:18 -03003001 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3002}
3003
3004/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003005void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003006{
3007 struct hci_conn_params *params, *tmp;
3008
3009 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03003010 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3011 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02003012
3013 /* If trying to estabilish one time connection to disabled
3014 * device, leave the params, but mark them as just once.
3015 */
3016 if (params->explicit_connect) {
3017 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3018 continue;
3019 }
3020
Andre Guedes15819a72014-02-03 13:56:18 -03003021 list_del(&params->list);
3022 kfree(params);
3023 }
3024
Johan Hedberg55af49a2014-07-02 17:37:26 +03003025 BT_DBG("All LE disabled connection parameters were removed");
3026}
3027
3028/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003029void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003030{
3031 struct hci_conn_params *params, *tmp;
3032
Johan Hedbergf6c63242014-08-15 21:06:59 +03003033 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3034 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003035
Johan Hedberga2f41a82014-07-04 12:37:19 +03003036 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003037
Andre Guedes15819a72014-02-03 13:56:18 -03003038 BT_DBG("All LE connection parameters were removed");
3039}
3040
Marcel Holtmann1904a852015-01-11 13:50:44 -08003041static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003042{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003043 if (status) {
3044 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003045
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003046 hci_dev_lock(hdev);
3047 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3048 hci_dev_unlock(hdev);
3049 return;
3050 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003051}
3052
Marcel Holtmann1904a852015-01-11 13:50:44 -08003053static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3054 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003055{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003056 /* General inquiry access code (GIAC) */
3057 u8 lap[3] = { 0x33, 0x8b, 0x9e };
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003058 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003059 int err;
3060
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003061 if (status) {
3062 BT_ERR("Failed to disable LE scanning: status %d", status);
3063 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003064 }
3065
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003066 hdev->discovery.scan_start = 0;
3067
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003068 switch (hdev->discovery.type) {
3069 case DISCOV_TYPE_LE:
3070 hci_dev_lock(hdev);
3071 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3072 hci_dev_unlock(hdev);
3073 break;
3074
3075 case DISCOV_TYPE_INTERLEAVED:
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003076 hci_dev_lock(hdev);
3077
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003078 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3079 &hdev->quirks)) {
3080 /* If we were running LE only scan, change discovery
3081 * state. If we were running both LE and BR/EDR inquiry
3082 * simultaneously, and BR/EDR inquiry is already
3083 * finished, stop discovery, otherwise BR/EDR inquiry
Wesley Kuo177d0502015-05-13 10:33:15 +08003084 * will stop discovery when finished. If we will resolve
3085 * remote device name, do not change discovery state.
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003086 */
Wesley Kuo177d0502015-05-13 10:33:15 +08003087 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3088 hdev->discovery.state != DISCOVERY_RESOLVING)
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003089 hci_discovery_set_state(hdev,
3090 DISCOVERY_STOPPED);
3091 } else {
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003092 struct hci_request req;
3093
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003094 hci_inquiry_cache_flush(hdev);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003095
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003096 hci_req_init(&req, hdev);
3097
3098 memset(&cp, 0, sizeof(cp));
3099 memcpy(&cp.lap, lap, sizeof(cp.lap));
3100 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3101 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3102
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003103 err = hci_req_run(&req, inquiry_complete);
3104 if (err) {
3105 BT_ERR("Inquiry request failed: err %d", err);
3106 hci_discovery_set_state(hdev,
3107 DISCOVERY_STOPPED);
3108 }
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003109 }
3110
3111 hci_dev_unlock(hdev);
3112 break;
3113 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003114}
3115
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003116static void le_scan_disable_work(struct work_struct *work)
3117{
3118 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003119 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003120 struct hci_request req;
3121 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003122
3123 BT_DBG("%s", hdev->name);
3124
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003125 cancel_delayed_work_sync(&hdev->le_scan_restart);
3126
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003127 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003128
Andre Guedesb1efcc22014-02-26 20:21:40 -03003129 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003130
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003131 err = hci_req_run(&req, le_scan_disable_work_complete);
3132 if (err)
3133 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003134}
3135
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003136static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3137 u16 opcode)
3138{
3139 unsigned long timeout, duration, scan_start, now;
3140
3141 BT_DBG("%s", hdev->name);
3142
3143 if (status) {
3144 BT_ERR("Failed to restart LE scan: status %d", status);
3145 return;
3146 }
3147
3148 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3149 !hdev->discovery.scan_start)
3150 return;
3151
3152 /* When the scan was started, hdev->le_scan_disable has been queued
3153 * after duration from scan_start. During scan restart this job
3154 * has been canceled, and we need to queue it again after proper
3155 * timeout, to make sure that scan does not run indefinitely.
3156 */
3157 duration = hdev->discovery.scan_duration;
3158 scan_start = hdev->discovery.scan_start;
3159 now = jiffies;
3160 if (now - scan_start <= duration) {
3161 int elapsed;
3162
3163 if (now >= scan_start)
3164 elapsed = now - scan_start;
3165 else
3166 elapsed = ULONG_MAX - scan_start + now;
3167
3168 timeout = duration - elapsed;
3169 } else {
3170 timeout = 0;
3171 }
3172 queue_delayed_work(hdev->workqueue,
3173 &hdev->le_scan_disable, timeout);
3174}
3175
3176static void le_scan_restart_work(struct work_struct *work)
3177{
3178 struct hci_dev *hdev = container_of(work, struct hci_dev,
3179 le_scan_restart.work);
3180 struct hci_request req;
3181 struct hci_cp_le_set_scan_enable cp;
3182 int err;
3183
3184 BT_DBG("%s", hdev->name);
3185
3186 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003187 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003188 return;
3189
3190 hci_req_init(&req, hdev);
3191
3192 hci_req_add_le_scan_disable(&req);
3193
3194 memset(&cp, 0, sizeof(cp));
3195 cp.enable = LE_SCAN_ENABLE;
3196 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3197 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3198
3199 err = hci_req_run(&req, le_scan_restart_work_complete);
3200 if (err)
3201 BT_ERR("Restart LE scan request failed: err %d", err);
3202}
3203
Johan Hedberga1f4c312014-02-27 14:05:41 +02003204/* Copy the Identity Address of the controller.
3205 *
3206 * If the controller has a public BD_ADDR, then by default use that one.
3207 * If this is a LE only controller without a public address, default to
3208 * the static random address.
3209 *
3210 * For debugging purposes it is possible to force controllers with a
3211 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003212 *
3213 * In case BR/EDR has been disabled on a dual-mode controller and
3214 * userspace has configured a static address, then that address
3215 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02003216 */
3217void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3218 u8 *bdaddr_type)
3219{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07003220 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003221 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003222 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003223 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02003224 bacpy(bdaddr, &hdev->static_addr);
3225 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3226 } else {
3227 bacpy(bdaddr, &hdev->bdaddr);
3228 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3229 }
3230}
3231
David Herrmann9be0dab2012-04-22 14:39:57 +02003232/* Alloc HCI device */
3233struct hci_dev *hci_alloc_dev(void)
3234{
3235 struct hci_dev *hdev;
3236
Johan Hedberg27f70f32014-07-21 10:50:06 +03003237 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003238 if (!hdev)
3239 return NULL;
3240
David Herrmannb1b813d2012-04-22 14:39:58 +02003241 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3242 hdev->esco_type = (ESCO_HV1);
3243 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003244 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3245 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003246 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003247 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3248 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02003249 hdev->adv_instance_cnt = 0;
3250 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02003251 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02003252
David Herrmannb1b813d2012-04-22 14:39:58 +02003253 hdev->sniff_max_interval = 800;
3254 hdev->sniff_min_interval = 80;
3255
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003256 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003257 hdev->le_adv_min_interval = 0x0800;
3258 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003259 hdev->le_scan_interval = 0x0060;
3260 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003261 hdev->le_conn_min_interval = 0x0028;
3262 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003263 hdev->le_conn_latency = 0x0000;
3264 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01003265 hdev->le_def_tx_len = 0x001b;
3266 hdev->le_def_tx_time = 0x0148;
3267 hdev->le_max_tx_len = 0x001b;
3268 hdev->le_max_tx_time = 0x0148;
3269 hdev->le_max_rx_len = 0x001b;
3270 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003271
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003272 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003273 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003274 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3275 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003276
David Herrmannb1b813d2012-04-22 14:39:58 +02003277 mutex_init(&hdev->lock);
3278 mutex_init(&hdev->req_lock);
3279
3280 INIT_LIST_HEAD(&hdev->mgmt_pending);
3281 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003282 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003283 INIT_LIST_HEAD(&hdev->uuids);
3284 INIT_LIST_HEAD(&hdev->link_keys);
3285 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003286 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003287 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003288 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003289 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003290 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003291 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003292 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02003293 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02003294
3295 INIT_WORK(&hdev->rx_work, hci_rx_work);
3296 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3297 INIT_WORK(&hdev->tx_work, hci_tx_work);
3298 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003299 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003300
David Herrmannb1b813d2012-04-22 14:39:58 +02003301 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3302 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3303 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003304 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Florian Grandel5d900e42015-06-18 03:16:35 +02003305 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
David Herrmannb1b813d2012-04-22 14:39:58 +02003306
David Herrmannb1b813d2012-04-22 14:39:58 +02003307 skb_queue_head_init(&hdev->rx_q);
3308 skb_queue_head_init(&hdev->cmd_q);
3309 skb_queue_head_init(&hdev->raw_q);
3310
3311 init_waitqueue_head(&hdev->req_wait_q);
3312
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003313 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003314
David Herrmannb1b813d2012-04-22 14:39:58 +02003315 hci_init_sysfs(hdev);
3316 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003317
3318 return hdev;
3319}
3320EXPORT_SYMBOL(hci_alloc_dev);
3321
3322/* Free HCI device */
3323void hci_free_dev(struct hci_dev *hdev)
3324{
David Herrmann9be0dab2012-04-22 14:39:57 +02003325 /* will free via device release */
3326 put_device(&hdev->dev);
3327}
3328EXPORT_SYMBOL(hci_free_dev);
3329
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330/* Register HCI device */
3331int hci_register_dev(struct hci_dev *hdev)
3332{
David Herrmannb1b813d2012-04-22 14:39:58 +02003333 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334
Marcel Holtmann74292d52014-07-06 15:50:27 +02003335 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 return -EINVAL;
3337
Mat Martineau08add512011-11-02 16:18:36 -07003338 /* Do not allow HCI_AMP devices to register at index 0,
3339 * so the index can be used as the AMP controller ID.
3340 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003341 switch (hdev->dev_type) {
3342 case HCI_BREDR:
3343 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3344 break;
3345 case HCI_AMP:
3346 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3347 break;
3348 default:
3349 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003351
Sasha Levin3df92b32012-05-27 22:36:56 +02003352 if (id < 0)
3353 return id;
3354
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 sprintf(hdev->name, "hci%d", id);
3356 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003357
3358 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3359
Kees Cookd8537542013-07-03 15:04:57 -07003360 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3361 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003362 if (!hdev->workqueue) {
3363 error = -ENOMEM;
3364 goto err;
3365 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003366
Kees Cookd8537542013-07-03 15:04:57 -07003367 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3368 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003369 if (!hdev->req_workqueue) {
3370 destroy_workqueue(hdev->workqueue);
3371 error = -ENOMEM;
3372 goto err;
3373 }
3374
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003375 if (!IS_ERR_OR_NULL(bt_debugfs))
3376 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3377
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003378 dev_set_name(&hdev->dev, "%s", hdev->name);
3379
3380 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003381 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003382 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003384 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003385 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3386 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003387 if (hdev->rfkill) {
3388 if (rfkill_register(hdev->rfkill) < 0) {
3389 rfkill_destroy(hdev->rfkill);
3390 hdev->rfkill = NULL;
3391 }
3392 }
3393
Johan Hedberg5e130362013-09-13 08:58:17 +03003394 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003395 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003396
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003397 hci_dev_set_flag(hdev, HCI_SETUP);
3398 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003399
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003400 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003401 /* Assume BR/EDR support until proven otherwise (such as
3402 * through reading supported features during init.
3403 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003404 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003405 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003406
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003407 write_lock(&hci_dev_list_lock);
3408 list_add(&hdev->list, &hci_dev_list);
3409 write_unlock(&hci_dev_list_lock);
3410
Marcel Holtmann4a964402014-07-02 19:10:33 +02003411 /* Devices that are marked for raw-only usage are unconfigured
3412 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003413 */
3414 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003415 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003416
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003418 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419
Johan Hedberg19202572013-01-14 22:33:51 +02003420 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003421
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003423
David Herrmann33ca9542011-10-08 14:58:49 +02003424err_wqueue:
3425 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003426 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003427err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003428 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003429
David Herrmann33ca9542011-10-08 14:58:49 +02003430 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431}
3432EXPORT_SYMBOL(hci_register_dev);
3433
3434/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003435void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003437 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003438
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003439 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003441 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003442
Sasha Levin3df92b32012-05-27 22:36:56 +02003443 id = hdev->id;
3444
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003445 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003447 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448
3449 hci_dev_do_close(hdev);
3450
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003451 cancel_work_sync(&hdev->power_on);
3452
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003453 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003454 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3455 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003456 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003457 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003458 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003459 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003460
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003461 /* mgmt_index_removed should take care of emptying the
3462 * pending list */
3463 BUG_ON(!list_empty(&hdev->mgmt_pending));
3464
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 hci_notify(hdev, HCI_DEV_UNREG);
3466
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003467 if (hdev->rfkill) {
3468 rfkill_unregister(hdev->rfkill);
3469 rfkill_destroy(hdev->rfkill);
3470 }
3471
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003472 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003473
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003474 debugfs_remove_recursive(hdev->debugfs);
3475
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003476 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003477 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003478
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003479 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003480 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003481 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003482 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003483 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003484 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003485 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003486 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003487 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003488 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003489 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003490 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003491 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003492
David Herrmanndc946bd2012-01-07 15:47:24 +01003493 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003494
3495 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496}
3497EXPORT_SYMBOL(hci_unregister_dev);
3498
3499/* Suspend HCI device */
3500int hci_suspend_dev(struct hci_dev *hdev)
3501{
3502 hci_notify(hdev, HCI_DEV_SUSPEND);
3503 return 0;
3504}
3505EXPORT_SYMBOL(hci_suspend_dev);
3506
3507/* Resume HCI device */
3508int hci_resume_dev(struct hci_dev *hdev)
3509{
3510 hci_notify(hdev, HCI_DEV_RESUME);
3511 return 0;
3512}
3513EXPORT_SYMBOL(hci_resume_dev);
3514
Marcel Holtmann75e05692014-11-02 08:15:38 +01003515/* Reset HCI device */
3516int hci_reset_dev(struct hci_dev *hdev)
3517{
3518 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3519 struct sk_buff *skb;
3520
3521 skb = bt_skb_alloc(3, GFP_ATOMIC);
3522 if (!skb)
3523 return -ENOMEM;
3524
3525 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3526 memcpy(skb_put(skb, 3), hw_err, 3);
3527
3528 /* Send Hardware Error to upper stack */
3529 return hci_recv_frame(hdev, skb);
3530}
3531EXPORT_SYMBOL(hci_reset_dev);
3532
Marcel Holtmann76bca882009-11-18 00:40:39 +01003533/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003534int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003535{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003536 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003537 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003538 kfree_skb(skb);
3539 return -ENXIO;
3540 }
3541
Marcel Holtmannfe806dc2015-10-08 03:14:28 +02003542 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3543 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3544 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3545 kfree_skb(skb);
3546 return -EINVAL;
3547 }
3548
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003549 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003550 bt_cb(skb)->incoming = 1;
3551
3552 /* Time stamp */
3553 __net_timestamp(skb);
3554
Marcel Holtmann76bca882009-11-18 00:40:39 +01003555 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003556 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003557
Marcel Holtmann76bca882009-11-18 00:40:39 +01003558 return 0;
3559}
3560EXPORT_SYMBOL(hci_recv_frame);
3561
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003562/* Receive diagnostic message from HCI drivers */
3563int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3564{
3565 /* Time stamp */
3566 __net_timestamp(skb);
3567
3568 /* Mark as diagnostic packet and send to monitor */
3569 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3570 hci_send_to_monitor(hdev, skb);
3571
3572 kfree_skb(skb);
3573 return 0;
3574}
3575EXPORT_SYMBOL(hci_recv_diag);
3576
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577/* ---- Interface to upper protocols ---- */
3578
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579int hci_register_cb(struct hci_cb *cb)
3580{
3581 BT_DBG("%p name %s", cb, cb->name);
3582
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003583 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003584 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003585 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586
3587 return 0;
3588}
3589EXPORT_SYMBOL(hci_register_cb);
3590
3591int hci_unregister_cb(struct hci_cb *cb)
3592{
3593 BT_DBG("%p name %s", cb, cb->name);
3594
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003595 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003597 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598
3599 return 0;
3600}
3601EXPORT_SYMBOL(hci_unregister_cb);
3602
Marcel Holtmann51086992013-10-10 14:54:19 -07003603static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003605 int err;
3606
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003607 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003609 /* Time stamp */
3610 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003612 /* Send copy to monitor */
3613 hci_send_to_monitor(hdev, skb);
3614
3615 if (atomic_read(&hdev->promisc)) {
3616 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003617 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 }
3619
3620 /* Get rid of skb owner, prior to sending to the driver. */
3621 skb_orphan(skb);
3622
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003623 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3624 kfree_skb(skb);
3625 return;
3626 }
3627
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003628 err = hdev->send(hdev, skb);
3629 if (err < 0) {
3630 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3631 kfree_skb(skb);
3632 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633}
3634
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003635/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003636int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3637 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003638{
3639 struct sk_buff *skb;
3640
3641 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3642
3643 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3644 if (!skb) {
3645 BT_ERR("%s no memory for command", hdev->name);
3646 return -ENOMEM;
3647 }
3648
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003649 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003650 * single-command requests.
3651 */
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03003652 bt_cb(skb)->req.start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02003653
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003655 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656
3657 return 0;
3658}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659
3660/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003661void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662{
3663 struct hci_command_hdr *hdr;
3664
3665 if (!hdev->sent_cmd)
3666 return NULL;
3667
3668 hdr = (void *) hdev->sent_cmd->data;
3669
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003670 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671 return NULL;
3672
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003673 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674
3675 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3676}
3677
Loic Poulainfbef1682015-09-29 15:05:44 +02003678/* Send HCI command and wait for command commplete event */
3679struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3680 const void *param, u32 timeout)
3681{
3682 struct sk_buff *skb;
3683
3684 if (!test_bit(HCI_UP, &hdev->flags))
3685 return ERR_PTR(-ENETDOWN);
3686
3687 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3688
3689 hci_req_lock(hdev);
3690 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3691 hci_req_unlock(hdev);
3692
3693 return skb;
3694}
3695EXPORT_SYMBOL(hci_cmd_sync);
3696
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697/* Send ACL data */
3698static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3699{
3700 struct hci_acl_hdr *hdr;
3701 int len = skb->len;
3702
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003703 skb_push(skb, HCI_ACL_HDR_SIZE);
3704 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003705 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003706 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3707 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708}
3709
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003710static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003711 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003713 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714 struct hci_dev *hdev = conn->hdev;
3715 struct sk_buff *list;
3716
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003717 skb->len = skb_headlen(skb);
3718 skb->data_len = 0;
3719
3720 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003721
3722 switch (hdev->dev_type) {
3723 case HCI_BREDR:
3724 hci_add_acl_hdr(skb, conn->handle, flags);
3725 break;
3726 case HCI_AMP:
3727 hci_add_acl_hdr(skb, chan->handle, flags);
3728 break;
3729 default:
3730 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3731 return;
3732 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003733
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003734 list = skb_shinfo(skb)->frag_list;
3735 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003736 /* Non fragmented */
3737 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3738
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003739 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740 } else {
3741 /* Fragmented */
3742 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3743
3744 skb_shinfo(skb)->frag_list = NULL;
3745
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003746 /* Queue all fragments atomically. We need to use spin_lock_bh
3747 * here because of 6LoWPAN links, as there this function is
3748 * called from softirq and using normal spin lock could cause
3749 * deadlocks.
3750 */
3751 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003753 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003754
3755 flags &= ~ACL_START;
3756 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757 do {
3758 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003759
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003760 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003761 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762
3763 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3764
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003765 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766 } while (list);
3767
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003768 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003770}
3771
3772void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3773{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003774 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003775
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003776 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003777
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003778 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003780 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782
3783/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003784void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785{
3786 struct hci_dev *hdev = conn->hdev;
3787 struct hci_sco_hdr hdr;
3788
3789 BT_DBG("%s len %d", hdev->name, skb->len);
3790
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003791 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792 hdr.dlen = skb->len;
3793
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003794 skb_push(skb, HCI_SCO_HDR_SIZE);
3795 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003796 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003798 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003799
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003801 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803
3804/* ---- HCI TX task (outgoing data) ---- */
3805
3806/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003807static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3808 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809{
3810 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003811 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003812 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003814 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003816
3817 rcu_read_lock();
3818
3819 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003820 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003822
3823 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3824 continue;
3825
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826 num++;
3827
3828 if (c->sent < min) {
3829 min = c->sent;
3830 conn = c;
3831 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003832
3833 if (hci_conn_num(hdev, type) == num)
3834 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 }
3836
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003837 rcu_read_unlock();
3838
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003840 int cnt, q;
3841
3842 switch (conn->type) {
3843 case ACL_LINK:
3844 cnt = hdev->acl_cnt;
3845 break;
3846 case SCO_LINK:
3847 case ESCO_LINK:
3848 cnt = hdev->sco_cnt;
3849 break;
3850 case LE_LINK:
3851 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3852 break;
3853 default:
3854 cnt = 0;
3855 BT_ERR("Unknown link type");
3856 }
3857
3858 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859 *quote = q ? q : 1;
3860 } else
3861 *quote = 0;
3862
3863 BT_DBG("conn %p quote %d", conn, *quote);
3864 return conn;
3865}
3866
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003867static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003868{
3869 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003870 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871
Ville Tervobae1f5d92011-02-10 22:38:53 -03003872 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003874 rcu_read_lock();
3875
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003877 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003878 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003879 BT_ERR("%s killing stalled connection %pMR",
3880 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003881 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882 }
3883 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003884
3885 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886}
3887
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003888static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3889 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003890{
3891 struct hci_conn_hash *h = &hdev->conn_hash;
3892 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003893 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003894 struct hci_conn *conn;
3895 int cnt, q, conn_num = 0;
3896
3897 BT_DBG("%s", hdev->name);
3898
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003899 rcu_read_lock();
3900
3901 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003902 struct hci_chan *tmp;
3903
3904 if (conn->type != type)
3905 continue;
3906
3907 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3908 continue;
3909
3910 conn_num++;
3911
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003912 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003913 struct sk_buff *skb;
3914
3915 if (skb_queue_empty(&tmp->data_q))
3916 continue;
3917
3918 skb = skb_peek(&tmp->data_q);
3919 if (skb->priority < cur_prio)
3920 continue;
3921
3922 if (skb->priority > cur_prio) {
3923 num = 0;
3924 min = ~0;
3925 cur_prio = skb->priority;
3926 }
3927
3928 num++;
3929
3930 if (conn->sent < min) {
3931 min = conn->sent;
3932 chan = tmp;
3933 }
3934 }
3935
3936 if (hci_conn_num(hdev, type) == conn_num)
3937 break;
3938 }
3939
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003940 rcu_read_unlock();
3941
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003942 if (!chan)
3943 return NULL;
3944
3945 switch (chan->conn->type) {
3946 case ACL_LINK:
3947 cnt = hdev->acl_cnt;
3948 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003949 case AMP_LINK:
3950 cnt = hdev->block_cnt;
3951 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003952 case SCO_LINK:
3953 case ESCO_LINK:
3954 cnt = hdev->sco_cnt;
3955 break;
3956 case LE_LINK:
3957 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3958 break;
3959 default:
3960 cnt = 0;
3961 BT_ERR("Unknown link type");
3962 }
3963
3964 q = cnt / num;
3965 *quote = q ? q : 1;
3966 BT_DBG("chan %p quote %d", chan, *quote);
3967 return chan;
3968}
3969
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003970static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3971{
3972 struct hci_conn_hash *h = &hdev->conn_hash;
3973 struct hci_conn *conn;
3974 int num = 0;
3975
3976 BT_DBG("%s", hdev->name);
3977
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003978 rcu_read_lock();
3979
3980 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003981 struct hci_chan *chan;
3982
3983 if (conn->type != type)
3984 continue;
3985
3986 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3987 continue;
3988
3989 num++;
3990
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003991 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003992 struct sk_buff *skb;
3993
3994 if (chan->sent) {
3995 chan->sent = 0;
3996 continue;
3997 }
3998
3999 if (skb_queue_empty(&chan->data_q))
4000 continue;
4001
4002 skb = skb_peek(&chan->data_q);
4003 if (skb->priority >= HCI_PRIO_MAX - 1)
4004 continue;
4005
4006 skb->priority = HCI_PRIO_MAX - 1;
4007
4008 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004009 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004010 }
4011
4012 if (hci_conn_num(hdev, type) == num)
4013 break;
4014 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004015
4016 rcu_read_unlock();
4017
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004018}
4019
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004020static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4021{
4022 /* Calculate count of blocks used by this packet */
4023 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4024}
4025
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004026static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004027{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004028 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029 /* ACL tx timeout must be longer than maximum
4030 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004031 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004032 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004033 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004034 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004035}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004037static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004038{
4039 unsigned int cnt = hdev->acl_cnt;
4040 struct hci_chan *chan;
4041 struct sk_buff *skb;
4042 int quote;
4043
4044 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004045
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004046 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004047 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004048 u32 priority = (skb_peek(&chan->data_q))->priority;
4049 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004050 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004051 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004052
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004053 /* Stop if priority has changed */
4054 if (skb->priority < priority)
4055 break;
4056
4057 skb = skb_dequeue(&chan->data_q);
4058
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004059 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004060 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004061
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004062 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063 hdev->acl_last_tx = jiffies;
4064
4065 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004066 chan->sent++;
4067 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068 }
4069 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004070
4071 if (cnt != hdev->acl_cnt)
4072 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073}
4074
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004075static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004076{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004077 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004078 struct hci_chan *chan;
4079 struct sk_buff *skb;
4080 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004081 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004082
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004083 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004084
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004085 BT_DBG("%s", hdev->name);
4086
4087 if (hdev->dev_type == HCI_AMP)
4088 type = AMP_LINK;
4089 else
4090 type = ACL_LINK;
4091
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004092 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004093 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004094 u32 priority = (skb_peek(&chan->data_q))->priority;
4095 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4096 int blocks;
4097
4098 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004099 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004100
4101 /* Stop if priority has changed */
4102 if (skb->priority < priority)
4103 break;
4104
4105 skb = skb_dequeue(&chan->data_q);
4106
4107 blocks = __get_blocks(hdev, skb);
4108 if (blocks > hdev->block_cnt)
4109 return;
4110
4111 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004112 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004113
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004114 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004115 hdev->acl_last_tx = jiffies;
4116
4117 hdev->block_cnt -= blocks;
4118 quote -= blocks;
4119
4120 chan->sent += blocks;
4121 chan->conn->sent += blocks;
4122 }
4123 }
4124
4125 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004126 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004127}
4128
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004129static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004130{
4131 BT_DBG("%s", hdev->name);
4132
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004133 /* No ACL link over BR/EDR controller */
4134 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4135 return;
4136
4137 /* No AMP link over AMP controller */
4138 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004139 return;
4140
4141 switch (hdev->flow_ctl_mode) {
4142 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4143 hci_sched_acl_pkt(hdev);
4144 break;
4145
4146 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4147 hci_sched_acl_blk(hdev);
4148 break;
4149 }
4150}
4151
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004153static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154{
4155 struct hci_conn *conn;
4156 struct sk_buff *skb;
4157 int quote;
4158
4159 BT_DBG("%s", hdev->name);
4160
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004161 if (!hci_conn_num(hdev, SCO_LINK))
4162 return;
4163
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4165 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4166 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004167 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168
4169 conn->sent++;
4170 if (conn->sent == ~0)
4171 conn->sent = 0;
4172 }
4173 }
4174}
4175
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004176static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004177{
4178 struct hci_conn *conn;
4179 struct sk_buff *skb;
4180 int quote;
4181
4182 BT_DBG("%s", hdev->name);
4183
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004184 if (!hci_conn_num(hdev, ESCO_LINK))
4185 return;
4186
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004187 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4188 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004189 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4190 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004191 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004192
4193 conn->sent++;
4194 if (conn->sent == ~0)
4195 conn->sent = 0;
4196 }
4197 }
4198}
4199
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004200static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004201{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004202 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004203 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004204 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004205
4206 BT_DBG("%s", hdev->name);
4207
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004208 if (!hci_conn_num(hdev, LE_LINK))
4209 return;
4210
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004211 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004212 /* LE tx timeout must be longer than maximum
4213 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004214 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004215 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004216 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004217 }
4218
4219 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004220 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004221 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004222 u32 priority = (skb_peek(&chan->data_q))->priority;
4223 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004224 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004225 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004226
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004227 /* Stop if priority has changed */
4228 if (skb->priority < priority)
4229 break;
4230
4231 skb = skb_dequeue(&chan->data_q);
4232
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004233 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004234 hdev->le_last_tx = jiffies;
4235
4236 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004237 chan->sent++;
4238 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004239 }
4240 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004241
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004242 if (hdev->le_pkts)
4243 hdev->le_cnt = cnt;
4244 else
4245 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004246
4247 if (cnt != tmp)
4248 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004249}
4250
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004251static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004253 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254 struct sk_buff *skb;
4255
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004256 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004257 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004259 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07004260 /* Schedule queues and send stuff to HCI driver */
4261 hci_sched_acl(hdev);
4262 hci_sched_sco(hdev);
4263 hci_sched_esco(hdev);
4264 hci_sched_le(hdev);
4265 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004266
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267 /* Send next queued raw (unknown type) packet */
4268 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004269 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270}
4271
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004272/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273
4274/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004275static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276{
4277 struct hci_acl_hdr *hdr = (void *) skb->data;
4278 struct hci_conn *conn;
4279 __u16 handle, flags;
4280
4281 skb_pull(skb, HCI_ACL_HDR_SIZE);
4282
4283 handle = __le16_to_cpu(hdr->handle);
4284 flags = hci_flags(handle);
4285 handle = hci_handle(handle);
4286
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004287 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004288 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289
4290 hdev->stat.acl_rx++;
4291
4292 hci_dev_lock(hdev);
4293 conn = hci_conn_hash_lookup_handle(hdev, handle);
4294 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004295
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004297 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004298
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004300 l2cap_recv_acldata(conn, skb, flags);
4301 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004303 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004304 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305 }
4306
4307 kfree_skb(skb);
4308}
4309
4310/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004311static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312{
4313 struct hci_sco_hdr *hdr = (void *) skb->data;
4314 struct hci_conn *conn;
4315 __u16 handle;
4316
4317 skb_pull(skb, HCI_SCO_HDR_SIZE);
4318
4319 handle = __le16_to_cpu(hdr->handle);
4320
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004321 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322
4323 hdev->stat.sco_rx++;
4324
4325 hci_dev_lock(hdev);
4326 conn = hci_conn_hash_lookup_handle(hdev, handle);
4327 hci_dev_unlock(hdev);
4328
4329 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004331 sco_recv_scodata(conn, skb);
4332 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004334 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004335 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336 }
4337
4338 kfree_skb(skb);
4339}
4340
Johan Hedberg9238f362013-03-05 20:37:48 +02004341static bool hci_req_is_complete(struct hci_dev *hdev)
4342{
4343 struct sk_buff *skb;
4344
4345 skb = skb_peek(&hdev->cmd_q);
4346 if (!skb)
4347 return true;
4348
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03004349 return bt_cb(skb)->req.start;
Johan Hedberg9238f362013-03-05 20:37:48 +02004350}
4351
Johan Hedberg42c6b122013-03-05 20:37:49 +02004352static void hci_resend_last(struct hci_dev *hdev)
4353{
4354 struct hci_command_hdr *sent;
4355 struct sk_buff *skb;
4356 u16 opcode;
4357
4358 if (!hdev->sent_cmd)
4359 return;
4360
4361 sent = (void *) hdev->sent_cmd->data;
4362 opcode = __le16_to_cpu(sent->opcode);
4363 if (opcode == HCI_OP_RESET)
4364 return;
4365
4366 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4367 if (!skb)
4368 return;
4369
4370 skb_queue_head(&hdev->cmd_q, skb);
4371 queue_work(hdev->workqueue, &hdev->cmd_work);
4372}
4373
Johan Hedberge62144872015-04-02 13:41:08 +03004374void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4375 hci_req_complete_t *req_complete,
4376 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004377{
Johan Hedberg9238f362013-03-05 20:37:48 +02004378 struct sk_buff *skb;
4379 unsigned long flags;
4380
4381 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4382
Johan Hedberg42c6b122013-03-05 20:37:49 +02004383 /* If the completed command doesn't match the last one that was
4384 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004385 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004386 if (!hci_sent_cmd_data(hdev, opcode)) {
4387 /* Some CSR based controllers generate a spontaneous
4388 * reset complete event during init and any pending
4389 * command will never be completed. In such a case we
4390 * need to resend whatever was the last sent
4391 * command.
4392 */
4393 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4394 hci_resend_last(hdev);
4395
Johan Hedberg9238f362013-03-05 20:37:48 +02004396 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004397 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004398
4399 /* If the command succeeded and there's still more commands in
4400 * this request the request is not yet complete.
4401 */
4402 if (!status && !hci_req_is_complete(hdev))
4403 return;
4404
4405 /* If this was the last command in a request the complete
4406 * callback would be found in hdev->sent_cmd instead of the
4407 * command queue (hdev->cmd_q).
4408 */
Johan Hedberge62144872015-04-02 13:41:08 +03004409 if (bt_cb(hdev->sent_cmd)->req.complete) {
4410 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4411 return;
4412 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004413
Johan Hedberge62144872015-04-02 13:41:08 +03004414 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4415 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4416 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004417 }
4418
4419 /* Remove all pending commands belonging to this request */
4420 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4421 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03004422 if (bt_cb(skb)->req.start) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004423 __skb_queue_head(&hdev->cmd_q, skb);
4424 break;
4425 }
4426
Johan Hedberge62144872015-04-02 13:41:08 +03004427 *req_complete = bt_cb(skb)->req.complete;
4428 *req_complete_skb = bt_cb(skb)->req.complete_skb;
Johan Hedberg9238f362013-03-05 20:37:48 +02004429 kfree_skb(skb);
4430 }
4431 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004432}
4433
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004434static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004436 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437 struct sk_buff *skb;
4438
4439 BT_DBG("%s", hdev->name);
4440
Linus Torvalds1da177e2005-04-16 15:20:36 -07004441 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004442 /* Send copy to monitor */
4443 hci_send_to_monitor(hdev, skb);
4444
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445 if (atomic_read(&hdev->promisc)) {
4446 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004447 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448 }
4449
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004450 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 kfree_skb(skb);
4452 continue;
4453 }
4454
4455 if (test_bit(HCI_INIT, &hdev->flags)) {
4456 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004457 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458 case HCI_ACLDATA_PKT:
4459 case HCI_SCODATA_PKT:
4460 kfree_skb(skb);
4461 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463 }
4464
4465 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004466 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004468 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469 hci_event_packet(hdev, skb);
4470 break;
4471
4472 case HCI_ACLDATA_PKT:
4473 BT_DBG("%s ACL data packet", hdev->name);
4474 hci_acldata_packet(hdev, skb);
4475 break;
4476
4477 case HCI_SCODATA_PKT:
4478 BT_DBG("%s SCO data packet", hdev->name);
4479 hci_scodata_packet(hdev, skb);
4480 break;
4481
4482 default:
4483 kfree_skb(skb);
4484 break;
4485 }
4486 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487}
4488
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004489static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004491 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492 struct sk_buff *skb;
4493
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004494 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4495 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004498 if (atomic_read(&hdev->cmd_cnt)) {
4499 skb = skb_dequeue(&hdev->cmd_q);
4500 if (!skb)
4501 return;
4502
Wei Yongjun7585b972009-02-25 18:29:52 +08004503 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004505 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004506 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004508 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004509 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004510 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004511 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004512 schedule_delayed_work(&hdev->cmd_timer,
4513 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514 } else {
4515 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004516 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517 }
4518 }
4519}