blob: b2095ca8472e80dde19c5a4834f34156fea5dca2 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020054DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070083 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070097
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700123 kfree_skb(skb);
124
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700126
127 return count;
128}
129
130static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135};
136
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200137static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
138 size_t count, loff_t *ppos)
139{
140 struct hci_dev *hdev = file->private_data;
141 char buf[3];
142
143 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
144 buf[1] = '\n';
145 buf[2] = '\0';
146 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
147}
148
149static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
150 size_t count, loff_t *ppos)
151{
152 struct hci_dev *hdev = file->private_data;
153 char buf[32];
154 size_t buf_size = min(count, (sizeof(buf)-1));
155 bool enable;
156 int err;
157
158 if (copy_from_user(buf, user_buf, buf_size))
159 return -EFAULT;
160
161 buf[buf_size] = '\0';
162 if (strtobool(buf, &enable))
163 return -EINVAL;
164
165 hci_req_lock(hdev);
166 err = hdev->set_diag(hdev, enable);
167 hci_req_unlock(hdev);
168
169 if (err < 0)
170 return err;
171
172 if (enable)
173 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
174 else
175 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
176
177 return count;
178}
179
180static const struct file_operations vendor_diag_fops = {
181 .open = simple_open,
182 .read = vendor_diag_read,
183 .write = vendor_diag_write,
184 .llseek = default_llseek,
185};
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187/* ---- HCI requests ---- */
188
Johan Hedbergf60cb302015-04-02 13:41:09 +0300189static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
190 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200192 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194 if (hdev->req_status == HCI_REQ_PEND) {
195 hdev->req_result = result;
196 hdev->req_status = HCI_REQ_DONE;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300197 if (skb)
198 hdev->req_skb = skb_get(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 wake_up_interruptible(&hdev->req_wait_q);
200 }
201}
202
203static void hci_req_cancel(struct hci_dev *hdev, int err)
204{
205 BT_DBG("%s err 0x%2.2x", hdev->name, err);
206
207 if (hdev->req_status == HCI_REQ_PEND) {
208 hdev->req_result = err;
209 hdev->req_status = HCI_REQ_CANCELED;
210 wake_up_interruptible(&hdev->req_wait_q);
211 }
212}
213
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300214struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300215 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300216{
217 DECLARE_WAITQUEUE(wait, current);
218 struct hci_request req;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300219 struct sk_buff *skb;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300220 int err = 0;
221
222 BT_DBG("%s", hdev->name);
223
224 hci_req_init(&req, hdev);
225
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300226 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300227
228 hdev->req_status = HCI_REQ_PEND;
229
Johan Hedberg75e84b72013-04-02 13:35:04 +0300230 add_wait_queue(&hdev->req_wait_q, &wait);
231 set_current_state(TASK_INTERRUPTIBLE);
232
Johan Hedbergf60cb302015-04-02 13:41:09 +0300233 err = hci_req_run_skb(&req, hci_req_sync_complete);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900234 if (err < 0) {
235 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200236 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900237 return ERR_PTR(err);
238 }
239
Johan Hedberg75e84b72013-04-02 13:35:04 +0300240 schedule_timeout(timeout);
241
242 remove_wait_queue(&hdev->req_wait_q, &wait);
243
244 if (signal_pending(current))
245 return ERR_PTR(-EINTR);
246
247 switch (hdev->req_status) {
248 case HCI_REQ_DONE:
249 err = -bt_to_errno(hdev->req_result);
250 break;
251
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
254 break;
255
256 default:
257 err = -ETIMEDOUT;
258 break;
259 }
260
261 hdev->req_status = hdev->req_result = 0;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300262 skb = hdev->req_skb;
263 hdev->req_skb = NULL;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300264
265 BT_DBG("%s end: err %d", hdev->name, err);
266
Johan Hedbergf60cb302015-04-02 13:41:09 +0300267 if (err < 0) {
268 kfree_skb(skb);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300269 return ERR_PTR(err);
Johan Hedbergf60cb302015-04-02 13:41:09 +0300270 }
Johan Hedberg75e84b72013-04-02 13:35:04 +0300271
Johan Hedberg757aa0b2015-04-02 13:41:12 +0300272 if (!skb)
273 return ERR_PTR(-ENODATA);
274
275 return skb;
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300276}
277EXPORT_SYMBOL(__hci_cmd_sync_ev);
278
279struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300280 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300281{
282 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300283}
284EXPORT_SYMBOL(__hci_cmd_sync);
285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200287static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200288 void (*func)(struct hci_request *req,
289 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200290 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200292 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 DECLARE_WAITQUEUE(wait, current);
294 int err = 0;
295
296 BT_DBG("%s start", hdev->name);
297
Johan Hedberg42c6b122013-03-05 20:37:49 +0200298 hci_req_init(&req, hdev);
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 hdev->req_status = HCI_REQ_PEND;
301
Johan Hedberg42c6b122013-03-05 20:37:49 +0200302 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200303
Chan-yeol Park039fada2014-10-31 14:23:06 +0900304 add_wait_queue(&hdev->req_wait_q, &wait);
305 set_current_state(TASK_INTERRUPTIBLE);
306
Johan Hedbergf60cb302015-04-02 13:41:09 +0300307 err = hci_req_run_skb(&req, hci_req_sync_complete);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200309 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300310
Chan-yeol Park039fada2014-10-31 14:23:06 +0900311 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200312 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900313
Andre Guedes920c8302013-03-08 11:20:15 -0300314 /* ENODATA means the HCI request command queue is empty.
315 * This can happen when a request with conditionals doesn't
316 * trigger any commands to be sent. This is normal behavior
317 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200318 */
Andre Guedes920c8302013-03-08 11:20:15 -0300319 if (err == -ENODATA)
320 return 0;
321
322 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200323 }
324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 schedule_timeout(timeout);
326
327 remove_wait_queue(&hdev->req_wait_q, &wait);
328
329 if (signal_pending(current))
330 return -EINTR;
331
332 switch (hdev->req_status) {
333 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700334 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 break;
336
337 case HCI_REQ_CANCELED:
338 err = -hdev->req_result;
339 break;
340
341 default:
342 err = -ETIMEDOUT;
343 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700344 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Johan Hedberga5040ef2011-01-10 13:28:59 +0200346 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 BT_DBG("%s end: err %d", hdev->name, err);
349
350 return err;
351}
352
Johan Hedberg01178cd2013-03-05 20:37:41 +0200353static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 void (*req)(struct hci_request *req,
355 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200356 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357{
358 int ret;
359
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200360 if (!test_bit(HCI_UP, &hdev->flags))
361 return -ENETDOWN;
362
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 /* Serialize all requests */
364 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200365 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 hci_req_unlock(hdev);
367
368 return ret;
369}
370
Johan Hedberg42c6b122013-03-05 20:37:49 +0200371static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200373 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200376 set_bit(HCI_RESET, &req->hdev->flags);
377 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378}
379
Johan Hedberg42c6b122013-03-05 20:37:49 +0200380static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200387 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392}
393
Johan Hedberg0af801b2015-02-17 15:05:21 +0200394static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200395{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200396 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200397
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200398 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200399 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300400
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700401 /* Read Local Supported Commands */
402 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
403
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300404 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200405 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300406
407 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200408 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700409
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700410 /* Read Flow Control Mode */
411 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
412
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700413 /* Read Location Data */
414 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200415}
416
Johan Hedberg0af801b2015-02-17 15:05:21 +0200417static void amp_init2(struct hci_request *req)
418{
419 /* Read Local Supported Features. Not all AMP controllers
420 * support this so it's placed conditionally in the second
421 * stage init.
422 */
423 if (req->hdev->commands[14] & 0x20)
424 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
425}
426
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200428{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200429 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200430
431 BT_DBG("%s %ld", hdev->name, opt);
432
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300433 /* Reset */
434 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300436
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200437 switch (hdev->dev_type) {
438 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200439 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200440 break;
441
442 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200443 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200444 break;
445
446 default:
447 BT_ERR("Unknown device type %d", hdev->dev_type);
448 break;
449 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200450}
451
Johan Hedberg42c6b122013-03-05 20:37:49 +0200452static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200453{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200454 __le16 param;
455 __u8 flt_type;
456
457 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200458 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200459
460 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200461 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200462
463 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200464 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200465
466 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200467 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200468
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700469 /* Read Number of Supported IAC */
470 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
471
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700472 /* Read Current IAC LAP */
473 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
474
Johan Hedberg2177bab2013-03-05 20:37:43 +0200475 /* Clear Event Filters */
476 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200477 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200478
479 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700480 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482}
483
Johan Hedberg42c6b122013-03-05 20:37:49 +0200484static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200485{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300486 struct hci_dev *hdev = req->hdev;
487
Johan Hedberg2177bab2013-03-05 20:37:43 +0200488 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200489 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200490
491 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200492 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200493
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800494 /* Read LE Supported States */
495 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
496
Johan Hedberg2177bab2013-03-05 20:37:43 +0200497 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200498 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200499
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800500 /* Clear LE White List */
501 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300502
503 /* LE-only controllers have LE implicitly enabled */
504 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700505 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200506}
507
Johan Hedberg42c6b122013-03-05 20:37:49 +0200508static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200509{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200510 struct hci_dev *hdev = req->hdev;
511
Johan Hedberg2177bab2013-03-05 20:37:43 +0200512 /* The second byte is 0xff instead of 0x9f (two reserved bits
513 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
514 * command otherwise.
515 */
516 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
517
518 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
519 * any event mask for pre 1.2 devices.
520 */
521 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
522 return;
523
524 if (lmp_bredr_capable(hdev)) {
525 events[4] |= 0x01; /* Flow Specification Complete */
526 events[4] |= 0x02; /* Inquiry Result with RSSI */
527 events[4] |= 0x04; /* Read Remote Extended Features Complete */
528 events[5] |= 0x08; /* Synchronous Connection Complete */
529 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700530 } else {
531 /* Use a different default for LE-only devices */
532 memset(events, 0, sizeof(events));
533 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700534 events[1] |= 0x08; /* Read Remote Version Information Complete */
535 events[1] |= 0x20; /* Command Complete */
536 events[1] |= 0x40; /* Command Status */
537 events[1] |= 0x80; /* Hardware Error */
538 events[2] |= 0x04; /* Number of Completed Packets */
539 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200540
541 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
542 events[0] |= 0x80; /* Encryption Change */
543 events[5] |= 0x80; /* Encryption Key Refresh Complete */
544 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200545 }
546
547 if (lmp_inq_rssi_capable(hdev))
548 events[4] |= 0x02; /* Inquiry Result with RSSI */
549
550 if (lmp_sniffsubr_capable(hdev))
551 events[5] |= 0x20; /* Sniff Subrating */
552
553 if (lmp_pause_enc_capable(hdev))
554 events[5] |= 0x80; /* Encryption Key Refresh Complete */
555
556 if (lmp_ext_inq_capable(hdev))
557 events[5] |= 0x40; /* Extended Inquiry Result */
558
559 if (lmp_no_flush_capable(hdev))
560 events[7] |= 0x01; /* Enhanced Flush Complete */
561
562 if (lmp_lsto_capable(hdev))
563 events[6] |= 0x80; /* Link Supervision Timeout Changed */
564
565 if (lmp_ssp_capable(hdev)) {
566 events[6] |= 0x01; /* IO Capability Request */
567 events[6] |= 0x02; /* IO Capability Response */
568 events[6] |= 0x04; /* User Confirmation Request */
569 events[6] |= 0x08; /* User Passkey Request */
570 events[6] |= 0x10; /* Remote OOB Data Request */
571 events[6] |= 0x20; /* Simple Pairing Complete */
572 events[7] |= 0x04; /* User Passkey Notification */
573 events[7] |= 0x08; /* Keypress Notification */
574 events[7] |= 0x10; /* Remote Host Supported
575 * Features Notification
576 */
577 }
578
579 if (lmp_le_capable(hdev))
580 events[7] |= 0x20; /* LE Meta-Event */
581
Johan Hedberg42c6b122013-03-05 20:37:49 +0200582 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200583}
584
Johan Hedberg42c6b122013-03-05 20:37:49 +0200585static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200586{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200587 struct hci_dev *hdev = req->hdev;
588
Johan Hedberg0af801b2015-02-17 15:05:21 +0200589 if (hdev->dev_type == HCI_AMP)
590 return amp_init2(req);
591
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200593 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300594 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700595 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200596
597 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200598 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200599
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100600 /* All Bluetooth 1.2 and later controllers should support the
601 * HCI command for reading the local supported commands.
602 *
603 * Unfortunately some controllers indicate Bluetooth 1.2 support,
604 * but do not have support for this command. If that is the case,
605 * the driver can quirk the behavior and skip reading the local
606 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300607 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100608 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
609 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200611
612 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700613 /* When SSP is available, then the host features page
614 * should also be available as well. However some
615 * controllers list the max_page as 0 as long as SSP
616 * has not been enabled. To achieve proper debugging
617 * output, force the minimum max_page to 1 at least.
618 */
619 hdev->max_page = 0x01;
620
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700621 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200622 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800623
Johan Hedberg42c6b122013-03-05 20:37:49 +0200624 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
625 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200626 } else {
627 struct hci_cp_write_eir cp;
628
629 memset(hdev->eir, 0, sizeof(hdev->eir));
630 memset(&cp, 0, sizeof(cp));
631
Johan Hedberg42c6b122013-03-05 20:37:49 +0200632 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200633 }
634 }
635
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800636 if (lmp_inq_rssi_capable(hdev) ||
637 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800638 u8 mode;
639
640 /* If Extended Inquiry Result events are supported, then
641 * they are clearly preferred over Inquiry Result with RSSI
642 * events.
643 */
644 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
645
646 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
647 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200648
649 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200650 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200651
652 if (lmp_ext_feat_capable(hdev)) {
653 struct hci_cp_read_local_ext_features cp;
654
655 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200656 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
657 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200658 }
659
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700660 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200661 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200662 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
663 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200664 }
665}
666
Johan Hedberg42c6b122013-03-05 20:37:49 +0200667static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200668{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200669 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200670 struct hci_cp_write_def_link_policy cp;
671 u16 link_policy = 0;
672
673 if (lmp_rswitch_capable(hdev))
674 link_policy |= HCI_LP_RSWITCH;
675 if (lmp_hold_capable(hdev))
676 link_policy |= HCI_LP_HOLD;
677 if (lmp_sniff_capable(hdev))
678 link_policy |= HCI_LP_SNIFF;
679 if (lmp_park_capable(hdev))
680 link_policy |= HCI_LP_PARK;
681
682 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200684}
685
Johan Hedberg42c6b122013-03-05 20:37:49 +0200686static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200687{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200688 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200689 struct hci_cp_write_le_host_supported cp;
690
Johan Hedbergc73eee92013-04-19 18:35:21 +0300691 /* LE-only devices do not support explicit enablement */
692 if (!lmp_bredr_capable(hdev))
693 return;
694
Johan Hedberg2177bab2013-03-05 20:37:43 +0200695 memset(&cp, 0, sizeof(cp));
696
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700697 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200698 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200699 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200700 }
701
702 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
704 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200705}
706
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300707static void hci_set_event_mask_page_2(struct hci_request *req)
708{
709 struct hci_dev *hdev = req->hdev;
710 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
711
712 /* If Connectionless Slave Broadcast master role is supported
713 * enable all necessary events for it.
714 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800715 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300716 events[1] |= 0x40; /* Triggered Clock Capture */
717 events[1] |= 0x80; /* Synchronization Train Complete */
718 events[2] |= 0x10; /* Slave Page Response Timeout */
719 events[2] |= 0x20; /* CSB Channel Map Change */
720 }
721
722 /* If Connectionless Slave Broadcast slave role is supported
723 * enable all necessary events for it.
724 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800725 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300726 events[2] |= 0x01; /* Synchronization Train Received */
727 events[2] |= 0x02; /* CSB Receive */
728 events[2] |= 0x04; /* CSB Timeout */
729 events[2] |= 0x08; /* Truncated Page Complete */
730 }
731
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800732 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200733 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800734 events[2] |= 0x80;
735
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300736 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
737}
738
Johan Hedberg42c6b122013-03-05 20:37:49 +0200739static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200740{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200741 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300742 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200743
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200744 hci_setup_event_mask(req);
745
Johan Hedberge81be902015-08-30 21:47:20 +0300746 if (hdev->commands[6] & 0x20 &&
747 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800748 struct hci_cp_read_stored_link_key cp;
749
750 bacpy(&cp.bdaddr, BDADDR_ANY);
751 cp.read_all = 0x01;
752 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
753 }
754
Johan Hedberg2177bab2013-03-05 20:37:43 +0200755 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200757
Marcel Holtmann417287d2014-12-11 20:21:54 +0100758 if (hdev->commands[8] & 0x01)
759 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
760
761 /* Some older Broadcom based Bluetooth 1.2 controllers do not
762 * support the Read Page Scan Type command. Check support for
763 * this command in the bit mask of supported commands.
764 */
765 if (hdev->commands[13] & 0x01)
766 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
767
Andre Guedes9193c6e2014-07-01 18:10:09 -0300768 if (lmp_le_capable(hdev)) {
769 u8 events[8];
770
771 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200772 events[0] = 0x0f;
773
774 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
775 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300776
777 /* If controller supports the Connection Parameters Request
778 * Link Layer Procedure, enable the corresponding event.
779 */
780 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
781 events[0] |= 0x20; /* LE Remote Connection
782 * Parameter Request
783 */
784
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100785 /* If the controller supports the Data Length Extension
786 * feature, enable the corresponding event.
787 */
788 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
789 events[0] |= 0x40; /* LE Data Length Change */
790
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100791 /* If the controller supports Extended Scanner Filter
792 * Policies, enable the correspondig event.
793 */
794 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
795 events[1] |= 0x04; /* LE Direct Advertising
796 * Report
797 */
798
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100799 /* If the controller supports the LE Read Local P-256
800 * Public Key command, enable the corresponding event.
801 */
802 if (hdev->commands[34] & 0x02)
803 events[0] |= 0x80; /* LE Read Local P-256
804 * Public Key Complete
805 */
806
807 /* If the controller supports the LE Generate DHKey
808 * command, enable the corresponding event.
809 */
810 if (hdev->commands[34] & 0x04)
811 events[1] |= 0x01; /* LE Generate DHKey Complete */
812
Andre Guedes9193c6e2014-07-01 18:10:09 -0300813 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
814 events);
815
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200816 if (hdev->commands[25] & 0x40) {
817 /* Read LE Advertising Channel TX Power */
818 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
819 }
820
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100821 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
822 /* Read LE Maximum Data Length */
823 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
824
825 /* Read LE Suggested Default Data Length */
826 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
827 }
828
Johan Hedberg42c6b122013-03-05 20:37:49 +0200829 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300830 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300831
832 /* Read features beyond page 1 if available */
833 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
834 struct hci_cp_read_local_ext_features cp;
835
836 cp.page = p;
837 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
838 sizeof(cp), &cp);
839 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200840}
841
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300842static void hci_init4_req(struct hci_request *req, unsigned long opt)
843{
844 struct hci_dev *hdev = req->hdev;
845
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800846 /* Some Broadcom based Bluetooth controllers do not support the
847 * Delete Stored Link Key command. They are clearly indicating its
848 * absence in the bit mask of supported commands.
849 *
850 * Check the supported commands and only if the the command is marked
851 * as supported send it. If not supported assume that the controller
852 * does not have actual support for stored link keys which makes this
853 * command redundant anyway.
854 *
855 * Some controllers indicate that they support handling deleting
856 * stored link keys, but they don't. The quirk lets a driver
857 * just disable this command.
858 */
859 if (hdev->commands[6] & 0x80 &&
860 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
861 struct hci_cp_delete_stored_link_key cp;
862
863 bacpy(&cp.bdaddr, BDADDR_ANY);
864 cp.delete_all = 0x01;
865 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
866 sizeof(cp), &cp);
867 }
868
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300869 /* Set event mask page 2 if the HCI command for it is supported */
870 if (hdev->commands[22] & 0x04)
871 hci_set_event_mask_page_2(req);
872
Marcel Holtmann109e3192014-07-23 19:24:56 +0200873 /* Read local codec list if the HCI command is supported */
874 if (hdev->commands[29] & 0x20)
875 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
876
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200877 /* Get MWS transport configuration if the HCI command is supported */
878 if (hdev->commands[30] & 0x08)
879 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
880
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300881 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800882 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300883 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800884
885 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700886 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800887 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800888 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800889
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800890 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
891 sizeof(support), &support);
892 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300893}
894
Johan Hedberg2177bab2013-03-05 20:37:43 +0200895static int __hci_init(struct hci_dev *hdev)
896{
897 int err;
898
899 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
900 if (err < 0)
901 return err;
902
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700903 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200904 /* The Device Under Test (DUT) mode is special and available
905 * for all controller types. So just create it early on.
906 */
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700907 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
908 &dut_mode_fops);
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200909
910 /* When the driver supports the set_diag callback, then
911 * expose an entry to modify the vendor diagnostic setting.
912 */
913 if (hdev->set_diag)
914 debugfs_create_file("vendor_diag", 0644, hdev->debugfs,
915 hdev, &vendor_diag_fops);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700916 }
917
Johan Hedberg2177bab2013-03-05 20:37:43 +0200918 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
919 if (err < 0)
920 return err;
921
Johan Hedberg0af801b2015-02-17 15:05:21 +0200922 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
923 * BR/EDR/LE type controllers. AMP controllers only need the
924 * first two stages of init.
925 */
926 if (hdev->dev_type != HCI_BREDR)
927 return 0;
928
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300929 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
930 if (err < 0)
931 return err;
932
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700933 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
934 if (err < 0)
935 return err;
936
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800937 /* This function is only called when the controller is actually in
938 * configured state. When the controller is marked as unconfigured,
939 * this initialization procedure is not run.
940 *
941 * It means that it is possible that a controller runs through its
942 * setup phase and then discovers missing settings. If that is the
943 * case, then this function will not be called. It then will only
944 * be called during the config phase.
945 *
946 * So only when in setup phase or config phase, create the debugfs
947 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700948 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700949 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
950 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700951 return 0;
952
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100953 hci_debugfs_create_common(hdev);
954
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100955 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100956 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700957
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800958 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100959 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700960
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700961 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200962}
963
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200964static void hci_init0_req(struct hci_request *req, unsigned long opt)
965{
966 struct hci_dev *hdev = req->hdev;
967
968 BT_DBG("%s %ld", hdev->name, opt);
969
970 /* Reset */
971 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
972 hci_reset_req(req, 0);
973
974 /* Read Local Version */
975 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
976
977 /* Read BD Address */
978 if (hdev->set_bdaddr)
979 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
980}
981
982static int __hci_unconf_init(struct hci_dev *hdev)
983{
984 int err;
985
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200986 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
987 return 0;
988
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200989 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
990 if (err < 0)
991 return err;
992
993 return 0;
994}
995
Johan Hedberg42c6b122013-03-05 20:37:49 +0200996static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997{
998 __u8 scan = opt;
999
Johan Hedberg42c6b122013-03-05 20:37:49 +02001000 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
1002 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001003 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004}
1005
Johan Hedberg42c6b122013-03-05 20:37:49 +02001006static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007{
1008 __u8 auth = opt;
1009
Johan Hedberg42c6b122013-03-05 20:37:49 +02001010 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
1012 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001013 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014}
1015
Johan Hedberg42c6b122013-03-05 20:37:49 +02001016static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017{
1018 __u8 encrypt = opt;
1019
Johan Hedberg42c6b122013-03-05 20:37:49 +02001020 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001022 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001023 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024}
1025
Johan Hedberg42c6b122013-03-05 20:37:49 +02001026static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001027{
1028 __le16 policy = cpu_to_le16(opt);
1029
Johan Hedberg42c6b122013-03-05 20:37:49 +02001030 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001031
1032 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001034}
1035
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001036/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 * Device is held on return. */
1038struct hci_dev *hci_dev_get(int index)
1039{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001040 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
1042 BT_DBG("%d", index);
1043
1044 if (index < 0)
1045 return NULL;
1046
1047 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001048 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 if (d->id == index) {
1050 hdev = hci_dev_hold(d);
1051 break;
1052 }
1053 }
1054 read_unlock(&hci_dev_list_lock);
1055 return hdev;
1056}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
1058/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001059
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001060bool hci_discovery_active(struct hci_dev *hdev)
1061{
1062 struct discovery_state *discov = &hdev->discovery;
1063
Andre Guedes6fbe1952012-02-03 17:47:58 -03001064 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001065 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001066 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001067 return true;
1068
Andre Guedes6fbe1952012-02-03 17:47:58 -03001069 default:
1070 return false;
1071 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001072}
1073
Johan Hedbergff9ef572012-01-04 14:23:45 +02001074void hci_discovery_set_state(struct hci_dev *hdev, int state)
1075{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001076 int old_state = hdev->discovery.state;
1077
Johan Hedbergff9ef572012-01-04 14:23:45 +02001078 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1079
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001080 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001081 return;
1082
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001083 hdev->discovery.state = state;
1084
Johan Hedbergff9ef572012-01-04 14:23:45 +02001085 switch (state) {
1086 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001087 hci_update_background_scan(hdev);
1088
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001089 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001090 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001091 break;
1092 case DISCOVERY_STARTING:
1093 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001094 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001095 mgmt_discovering(hdev, 1);
1096 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001097 case DISCOVERY_RESOLVING:
1098 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001099 case DISCOVERY_STOPPING:
1100 break;
1101 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001102}
1103
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001104void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105{
Johan Hedberg30883512012-01-04 14:16:21 +02001106 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001107 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
Johan Hedberg561aafb2012-01-04 13:31:59 +02001109 list_for_each_entry_safe(p, n, &cache->all, all) {
1110 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001111 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001113
1114 INIT_LIST_HEAD(&cache->unknown);
1115 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116}
1117
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001118struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1119 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120{
Johan Hedberg30883512012-01-04 14:16:21 +02001121 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 struct inquiry_entry *e;
1123
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001124 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
Johan Hedberg561aafb2012-01-04 13:31:59 +02001126 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001128 return e;
1129 }
1130
1131 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132}
1133
Johan Hedberg561aafb2012-01-04 13:31:59 +02001134struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001135 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001136{
Johan Hedberg30883512012-01-04 14:16:21 +02001137 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001138 struct inquiry_entry *e;
1139
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001140 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001141
1142 list_for_each_entry(e, &cache->unknown, list) {
1143 if (!bacmp(&e->data.bdaddr, bdaddr))
1144 return e;
1145 }
1146
1147 return NULL;
1148}
1149
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001150struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001151 bdaddr_t *bdaddr,
1152 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001153{
1154 struct discovery_state *cache = &hdev->discovery;
1155 struct inquiry_entry *e;
1156
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001157 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001158
1159 list_for_each_entry(e, &cache->resolve, list) {
1160 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1161 return e;
1162 if (!bacmp(&e->data.bdaddr, bdaddr))
1163 return e;
1164 }
1165
1166 return NULL;
1167}
1168
Johan Hedberga3d4e202012-01-09 00:53:02 +02001169void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001170 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001171{
1172 struct discovery_state *cache = &hdev->discovery;
1173 struct list_head *pos = &cache->resolve;
1174 struct inquiry_entry *p;
1175
1176 list_del(&ie->list);
1177
1178 list_for_each_entry(p, &cache->resolve, list) {
1179 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001180 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001181 break;
1182 pos = &p->list;
1183 }
1184
1185 list_add(&ie->list, pos);
1186}
1187
Marcel Holtmannaf589252014-07-01 14:11:20 +02001188u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1189 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190{
Johan Hedberg30883512012-01-04 14:16:21 +02001191 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001192 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001193 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001195 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Johan Hedberg6928a922014-10-26 20:46:09 +01001197 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001198
Marcel Holtmannaf589252014-07-01 14:11:20 +02001199 if (!data->ssp_mode)
1200 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001201
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001202 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001203 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001204 if (!ie->data.ssp_mode)
1205 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001206
Johan Hedberga3d4e202012-01-09 00:53:02 +02001207 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001208 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001209 ie->data.rssi = data->rssi;
1210 hci_inquiry_cache_update_resolve(hdev, ie);
1211 }
1212
Johan Hedberg561aafb2012-01-04 13:31:59 +02001213 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001214 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001215
Johan Hedberg561aafb2012-01-04 13:31:59 +02001216 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001217 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001218 if (!ie) {
1219 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1220 goto done;
1221 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001222
1223 list_add(&ie->all, &cache->all);
1224
1225 if (name_known) {
1226 ie->name_state = NAME_KNOWN;
1227 } else {
1228 ie->name_state = NAME_NOT_KNOWN;
1229 list_add(&ie->list, &cache->unknown);
1230 }
1231
1232update:
1233 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001234 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001235 ie->name_state = NAME_KNOWN;
1236 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 }
1238
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001239 memcpy(&ie->data, data, sizeof(*data));
1240 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001242
1243 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001244 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001245
Marcel Holtmannaf589252014-07-01 14:11:20 +02001246done:
1247 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248}
1249
1250static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1251{
Johan Hedberg30883512012-01-04 14:16:21 +02001252 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 struct inquiry_info *info = (struct inquiry_info *) buf;
1254 struct inquiry_entry *e;
1255 int copied = 0;
1256
Johan Hedberg561aafb2012-01-04 13:31:59 +02001257 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001259
1260 if (copied >= num)
1261 break;
1262
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 bacpy(&info->bdaddr, &data->bdaddr);
1264 info->pscan_rep_mode = data->pscan_rep_mode;
1265 info->pscan_period_mode = data->pscan_period_mode;
1266 info->pscan_mode = data->pscan_mode;
1267 memcpy(info->dev_class, data->dev_class, 3);
1268 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001271 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 }
1273
1274 BT_DBG("cache %p, copied %d", cache, copied);
1275 return copied;
1276}
1277
Johan Hedberg42c6b122013-03-05 20:37:49 +02001278static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279{
1280 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001281 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 struct hci_cp_inquiry cp;
1283
1284 BT_DBG("%s", hdev->name);
1285
1286 if (test_bit(HCI_INQUIRY, &hdev->flags))
1287 return;
1288
1289 /* Start Inquiry */
1290 memcpy(&cp.lap, &ir->lap, 3);
1291 cp.length = ir->length;
1292 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294}
1295
1296int hci_inquiry(void __user *arg)
1297{
1298 __u8 __user *ptr = arg;
1299 struct hci_inquiry_req ir;
1300 struct hci_dev *hdev;
1301 int err = 0, do_inquiry = 0, max_rsp;
1302 long timeo;
1303 __u8 *buf;
1304
1305 if (copy_from_user(&ir, ptr, sizeof(ir)))
1306 return -EFAULT;
1307
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001308 hdev = hci_dev_get(ir.dev_id);
1309 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 return -ENODEV;
1311
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001312 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001313 err = -EBUSY;
1314 goto done;
1315 }
1316
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001317 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001318 err = -EOPNOTSUPP;
1319 goto done;
1320 }
1321
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001322 if (hdev->dev_type != HCI_BREDR) {
1323 err = -EOPNOTSUPP;
1324 goto done;
1325 }
1326
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001327 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001328 err = -EOPNOTSUPP;
1329 goto done;
1330 }
1331
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001332 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001333 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001334 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001335 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 do_inquiry = 1;
1337 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001338 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
Marcel Holtmann04837f62006-07-03 10:02:33 +02001340 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001341
1342 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001343 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1344 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001345 if (err < 0)
1346 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001347
1348 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1349 * cleared). If it is interrupted by a signal, return -EINTR.
1350 */
NeilBrown74316202014-07-07 15:16:04 +10001351 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001352 TASK_INTERRUPTIBLE))
1353 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001354 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001356 /* for unlimited number of responses we will use buffer with
1357 * 255 entries
1358 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1360
1361 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1362 * copy it to the user space.
1363 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001364 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001365 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 err = -ENOMEM;
1367 goto done;
1368 }
1369
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001370 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001372 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
1374 BT_DBG("num_rsp %d", ir.num_rsp);
1375
1376 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1377 ptr += sizeof(ir);
1378 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001379 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001381 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 err = -EFAULT;
1383
1384 kfree(buf);
1385
1386done:
1387 hci_dev_put(hdev);
1388 return err;
1389}
1390
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001391static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 int ret = 0;
1394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 BT_DBG("%s %p", hdev->name, hdev);
1396
1397 hci_req_lock(hdev);
1398
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001399 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001400 ret = -ENODEV;
1401 goto done;
1402 }
1403
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001404 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1405 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001406 /* Check for rfkill but allow the HCI setup stage to
1407 * proceed (which in itself doesn't cause any RF activity).
1408 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001409 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001410 ret = -ERFKILL;
1411 goto done;
1412 }
1413
1414 /* Check for valid public address or a configured static
1415 * random adddress, but let the HCI setup proceed to
1416 * be able to determine if there is a public address
1417 * or not.
1418 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001419 * In case of user channel usage, it is not important
1420 * if a public address or static random address is
1421 * available.
1422 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001423 * This check is only valid for BR/EDR controllers
1424 * since AMP controllers do not have an address.
1425 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001426 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001427 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001428 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1429 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1430 ret = -EADDRNOTAVAIL;
1431 goto done;
1432 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001433 }
1434
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 if (test_bit(HCI_UP, &hdev->flags)) {
1436 ret = -EALREADY;
1437 goto done;
1438 }
1439
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 if (hdev->open(hdev)) {
1441 ret = -EIO;
1442 goto done;
1443 }
1444
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001445 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001446 hci_notify(hdev, HCI_DEV_OPEN);
1447
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001448 atomic_set(&hdev->cmd_cnt, 1);
1449 set_bit(HCI_INIT, &hdev->flags);
1450
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001451 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001452 if (hdev->setup)
1453 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001454
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001455 /* The transport driver can set these quirks before
1456 * creating the HCI device or in its setup callback.
1457 *
1458 * In case any of them is set, the controller has to
1459 * start up as unconfigured.
1460 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001461 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1462 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001463 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001464
1465 /* For an unconfigured controller it is required to
1466 * read at least the version information provided by
1467 * the Read Local Version Information command.
1468 *
1469 * If the set_bdaddr driver callback is provided, then
1470 * also the original Bluetooth public device address
1471 * will be read using the Read BD Address command.
1472 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001473 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001474 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001475 }
1476
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001477 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001478 /* If public address change is configured, ensure that
1479 * the address gets programmed. If the driver does not
1480 * support changing the public address, fail the power
1481 * on procedure.
1482 */
1483 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1484 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001485 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1486 else
1487 ret = -EADDRNOTAVAIL;
1488 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001489
1490 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001491 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1492 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001493 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 }
1495
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001496 clear_bit(HCI_INIT, &hdev->flags);
1497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 if (!ret) {
1499 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001500 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 set_bit(HCI_UP, &hdev->flags);
1502 hci_notify(hdev, HCI_DEV_UP);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001503 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1504 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1505 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1506 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001507 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001508 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001509 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001510 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001511 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001512 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001514 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001515 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001516 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
1518 skb_queue_purge(&hdev->cmd_q);
1519 skb_queue_purge(&hdev->rx_q);
1520
1521 if (hdev->flush)
1522 hdev->flush(hdev);
1523
1524 if (hdev->sent_cmd) {
1525 kfree_skb(hdev->sent_cmd);
1526 hdev->sent_cmd = NULL;
1527 }
1528
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001529 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001530 hci_notify(hdev, HCI_DEV_CLOSE);
1531
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001533 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 }
1535
1536done:
1537 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 return ret;
1539}
1540
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001541/* ---- HCI ioctl helpers ---- */
1542
1543int hci_dev_open(__u16 dev)
1544{
1545 struct hci_dev *hdev;
1546 int err;
1547
1548 hdev = hci_dev_get(dev);
1549 if (!hdev)
1550 return -ENODEV;
1551
Marcel Holtmann4a964402014-07-02 19:10:33 +02001552 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001553 * up as user channel. Trying to bring them up as normal devices
1554 * will result into a failure. Only user channel operation is
1555 * possible.
1556 *
1557 * When this function is called for a user channel, the flag
1558 * HCI_USER_CHANNEL will be set first before attempting to
1559 * open the device.
1560 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001561 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1562 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001563 err = -EOPNOTSUPP;
1564 goto done;
1565 }
1566
Johan Hedberge1d08f42013-10-01 22:44:50 +03001567 /* We need to ensure that no other power on/off work is pending
1568 * before proceeding to call hci_dev_do_open. This is
1569 * particularly important if the setup procedure has not yet
1570 * completed.
1571 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001572 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001573 cancel_delayed_work(&hdev->power_off);
1574
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001575 /* After this call it is guaranteed that the setup procedure
1576 * has finished. This means that error conditions like RFKILL
1577 * or no valid public or static random address apply.
1578 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001579 flush_workqueue(hdev->req_workqueue);
1580
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001581 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001582 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001583 * so that pairing works for them. Once the management interface
1584 * is in use this bit will be cleared again and userspace has
1585 * to explicitly enable it.
1586 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001587 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1588 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001589 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001590
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001591 err = hci_dev_do_open(hdev);
1592
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001593done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001594 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001595 return err;
1596}
1597
Johan Hedbergd7347f32014-07-04 12:37:23 +03001598/* This function requires the caller holds hdev->lock */
1599static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1600{
1601 struct hci_conn_params *p;
1602
Johan Hedbergf161dd42014-08-15 21:06:54 +03001603 list_for_each_entry(p, &hdev->le_conn_params, list) {
1604 if (p->conn) {
1605 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001606 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001607 p->conn = NULL;
1608 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001609 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001610 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001611
1612 BT_DBG("All LE pending actions cleared");
1613}
1614
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001615int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616{
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001617 bool auto_off;
1618
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 BT_DBG("%s %p", hdev->name, hdev);
1620
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001621 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001622 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001623 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001624 /* Execute vendor specific shutdown routine */
1625 if (hdev->shutdown)
1626 hdev->shutdown(hdev);
1627 }
1628
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001629 cancel_delayed_work(&hdev->power_off);
1630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 hci_req_cancel(hdev, ENODEV);
1632 hci_req_lock(hdev);
1633
1634 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001635 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 hci_req_unlock(hdev);
1637 return 0;
1638 }
1639
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001640 /* Flush RX and TX works */
1641 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001642 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001644 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001645 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001646 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001647 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1648 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001649 }
1650
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001651 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001652 cancel_delayed_work(&hdev->service_cache);
1653
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001654 cancel_delayed_work_sync(&hdev->le_scan_disable);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08001655 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001656
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001657 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001658 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001659
Florian Grandel5d900e42015-06-18 03:16:35 +02001660 if (hdev->adv_instance_timeout) {
1661 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1662 hdev->adv_instance_timeout = 0;
1663 }
1664
Johan Hedberg76727c02014-11-18 09:00:14 +02001665 /* Avoid potential lockdep warnings from the *_flush() calls by
1666 * ensuring the workqueue is empty up front.
1667 */
1668 drain_workqueue(hdev->workqueue);
1669
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001670 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001671
Johan Hedberg8f502f82015-01-28 19:56:02 +02001672 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1673
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001674 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1675
1676 if (!auto_off && hdev->dev_type == HCI_BREDR)
1677 mgmt_powered(hdev, 0);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001678
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001679 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001680 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001681 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001682 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
Marcel Holtmann64dae962015-01-28 14:10:28 -08001684 smp_unregister(hdev);
1685
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 hci_notify(hdev, HCI_DEV_DOWN);
1687
1688 if (hdev->flush)
1689 hdev->flush(hdev);
1690
1691 /* Reset device */
1692 skb_queue_purge(&hdev->cmd_q);
1693 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001694 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1695 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001697 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 clear_bit(HCI_INIT, &hdev->flags);
1699 }
1700
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001701 /* flush cmd work */
1702 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
1704 /* Drop queues */
1705 skb_queue_purge(&hdev->rx_q);
1706 skb_queue_purge(&hdev->cmd_q);
1707 skb_queue_purge(&hdev->raw_q);
1708
1709 /* Drop last sent command */
1710 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001711 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 kfree_skb(hdev->sent_cmd);
1713 hdev->sent_cmd = NULL;
1714 }
1715
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001716 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001717 hci_notify(hdev, HCI_DEV_CLOSE);
1718
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 /* After this point our queues are empty
1720 * and no tasks are scheduled. */
1721 hdev->close(hdev);
1722
Johan Hedberg35b973c2013-03-15 17:06:59 -05001723 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001724 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001725 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001726
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001727 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001728 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001729
Johan Hedberge59fda82012-02-22 18:11:53 +02001730 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001731 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001732 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001733
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 hci_req_unlock(hdev);
1735
1736 hci_dev_put(hdev);
1737 return 0;
1738}
1739
1740int hci_dev_close(__u16 dev)
1741{
1742 struct hci_dev *hdev;
1743 int err;
1744
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001745 hdev = hci_dev_get(dev);
1746 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001748
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001749 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001750 err = -EBUSY;
1751 goto done;
1752 }
1753
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001754 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001755 cancel_delayed_work(&hdev->power_off);
1756
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001758
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001759done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 hci_dev_put(hdev);
1761 return err;
1762}
1763
Marcel Holtmann5c912492015-01-28 11:53:05 -08001764static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001766 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
Marcel Holtmann5c912492015-01-28 11:53:05 -08001768 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
1770 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 /* Drop queues */
1773 skb_queue_purge(&hdev->rx_q);
1774 skb_queue_purge(&hdev->cmd_q);
1775
Johan Hedberg76727c02014-11-18 09:00:14 +02001776 /* Avoid potential lockdep warnings from the *_flush() calls by
1777 * ensuring the workqueue is empty up front.
1778 */
1779 drain_workqueue(hdev->workqueue);
1780
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001781 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001782 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001784 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785
1786 if (hdev->flush)
1787 hdev->flush(hdev);
1788
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001789 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001790 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001792 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 return ret;
1796}
1797
Marcel Holtmann5c912492015-01-28 11:53:05 -08001798int hci_dev_reset(__u16 dev)
1799{
1800 struct hci_dev *hdev;
1801 int err;
1802
1803 hdev = hci_dev_get(dev);
1804 if (!hdev)
1805 return -ENODEV;
1806
1807 if (!test_bit(HCI_UP, &hdev->flags)) {
1808 err = -ENETDOWN;
1809 goto done;
1810 }
1811
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001812 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001813 err = -EBUSY;
1814 goto done;
1815 }
1816
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001817 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001818 err = -EOPNOTSUPP;
1819 goto done;
1820 }
1821
1822 err = hci_dev_do_reset(hdev);
1823
1824done:
1825 hci_dev_put(hdev);
1826 return err;
1827}
1828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829int hci_dev_reset_stat(__u16 dev)
1830{
1831 struct hci_dev *hdev;
1832 int ret = 0;
1833
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001834 hdev = hci_dev_get(dev);
1835 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 return -ENODEV;
1837
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001838 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001839 ret = -EBUSY;
1840 goto done;
1841 }
1842
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001843 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001844 ret = -EOPNOTSUPP;
1845 goto done;
1846 }
1847
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1849
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001850done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 return ret;
1853}
1854
Johan Hedberg123abc02014-07-10 12:09:07 +03001855static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1856{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001857 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001858
1859 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1860
1861 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001862 conn_changed = !hci_dev_test_and_set_flag(hdev,
1863 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001864 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001865 conn_changed = hci_dev_test_and_clear_flag(hdev,
1866 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001867
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001868 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001869 discov_changed = !hci_dev_test_and_set_flag(hdev,
1870 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001871 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001872 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001873 discov_changed = hci_dev_test_and_clear_flag(hdev,
1874 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001875 }
1876
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001877 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001878 return;
1879
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001880 if (conn_changed || discov_changed) {
1881 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001882 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001883
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001884 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001885 mgmt_update_adv_data(hdev);
1886
Johan Hedberg123abc02014-07-10 12:09:07 +03001887 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001888 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001889}
1890
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891int hci_dev_cmd(unsigned int cmd, void __user *arg)
1892{
1893 struct hci_dev *hdev;
1894 struct hci_dev_req dr;
1895 int err = 0;
1896
1897 if (copy_from_user(&dr, arg, sizeof(dr)))
1898 return -EFAULT;
1899
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001900 hdev = hci_dev_get(dr.dev_id);
1901 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 return -ENODEV;
1903
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001904 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001905 err = -EBUSY;
1906 goto done;
1907 }
1908
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001909 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001910 err = -EOPNOTSUPP;
1911 goto done;
1912 }
1913
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001914 if (hdev->dev_type != HCI_BREDR) {
1915 err = -EOPNOTSUPP;
1916 goto done;
1917 }
1918
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001919 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001920 err = -EOPNOTSUPP;
1921 goto done;
1922 }
1923
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 switch (cmd) {
1925 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001926 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1927 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 break;
1929
1930 case HCISETENCRYPT:
1931 if (!lmp_encrypt_capable(hdev)) {
1932 err = -EOPNOTSUPP;
1933 break;
1934 }
1935
1936 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1937 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001938 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1939 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 if (err)
1941 break;
1942 }
1943
Johan Hedberg01178cd2013-03-05 20:37:41 +02001944 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1945 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 break;
1947
1948 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001949 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1950 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001951
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001952 /* Ensure that the connectable and discoverable states
1953 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001954 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001955 if (!err)
1956 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 break;
1958
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001959 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001960 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1961 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001962 break;
1963
1964 case HCISETLINKMODE:
1965 hdev->link_mode = ((__u16) dr.dev_opt) &
1966 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1967 break;
1968
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 case HCISETPTYPE:
1970 hdev->pkt_type = (__u16) dr.dev_opt;
1971 break;
1972
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001974 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1975 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 break;
1977
1978 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001979 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1980 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 break;
1982
1983 default:
1984 err = -EINVAL;
1985 break;
1986 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001987
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001988done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 hci_dev_put(hdev);
1990 return err;
1991}
1992
1993int hci_get_dev_list(void __user *arg)
1994{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001995 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 struct hci_dev_list_req *dl;
1997 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 int n = 0, size, err;
1999 __u16 dev_num;
2000
2001 if (get_user(dev_num, (__u16 __user *) arg))
2002 return -EFAULT;
2003
2004 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2005 return -EINVAL;
2006
2007 size = sizeof(*dl) + dev_num * sizeof(*dr);
2008
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002009 dl = kzalloc(size, GFP_KERNEL);
2010 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 return -ENOMEM;
2012
2013 dr = dl->dev_req;
2014
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002015 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002016 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002017 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002018
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002019 /* When the auto-off is configured it means the transport
2020 * is running, but in that case still indicate that the
2021 * device is actually down.
2022 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002023 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002024 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002025
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002027 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002028
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 if (++n >= dev_num)
2030 break;
2031 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002032 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033
2034 dl->dev_num = n;
2035 size = sizeof(*dl) + n * sizeof(*dr);
2036
2037 err = copy_to_user(arg, dl, size);
2038 kfree(dl);
2039
2040 return err ? -EFAULT : 0;
2041}
2042
2043int hci_get_dev_info(void __user *arg)
2044{
2045 struct hci_dev *hdev;
2046 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002047 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 int err = 0;
2049
2050 if (copy_from_user(&di, arg, sizeof(di)))
2051 return -EFAULT;
2052
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002053 hdev = hci_dev_get(di.dev_id);
2054 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 return -ENODEV;
2056
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002057 /* When the auto-off is configured it means the transport
2058 * is running, but in that case still indicate that the
2059 * device is actually down.
2060 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002061 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002062 flags = hdev->flags & ~BIT(HCI_UP);
2063 else
2064 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002065
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 strcpy(di.name, hdev->name);
2067 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002068 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002069 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002071 if (lmp_bredr_capable(hdev)) {
2072 di.acl_mtu = hdev->acl_mtu;
2073 di.acl_pkts = hdev->acl_pkts;
2074 di.sco_mtu = hdev->sco_mtu;
2075 di.sco_pkts = hdev->sco_pkts;
2076 } else {
2077 di.acl_mtu = hdev->le_mtu;
2078 di.acl_pkts = hdev->le_pkts;
2079 di.sco_mtu = 0;
2080 di.sco_pkts = 0;
2081 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 di.link_policy = hdev->link_policy;
2083 di.link_mode = hdev->link_mode;
2084
2085 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2086 memcpy(&di.features, &hdev->features, sizeof(di.features));
2087
2088 if (copy_to_user(arg, &di, sizeof(di)))
2089 err = -EFAULT;
2090
2091 hci_dev_put(hdev);
2092
2093 return err;
2094}
2095
2096/* ---- Interface to HCI drivers ---- */
2097
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002098static int hci_rfkill_set_block(void *data, bool blocked)
2099{
2100 struct hci_dev *hdev = data;
2101
2102 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2103
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002104 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002105 return -EBUSY;
2106
Johan Hedberg5e130362013-09-13 08:58:17 +03002107 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002108 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002109 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2110 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002111 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002112 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002113 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002114 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002115
2116 return 0;
2117}
2118
2119static const struct rfkill_ops hci_rfkill_ops = {
2120 .set_block = hci_rfkill_set_block,
2121};
2122
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002123static void hci_power_on(struct work_struct *work)
2124{
2125 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002126 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002127
2128 BT_DBG("%s", hdev->name);
2129
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002130 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002131 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302132 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002133 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302134 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002135 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002136 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002137
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002138 /* During the HCI setup phase, a few error conditions are
2139 * ignored and they need to be checked now. If they are still
2140 * valid, it is important to turn the device back off.
2141 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002142 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2143 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002144 (hdev->dev_type == HCI_BREDR &&
2145 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2146 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002147 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002148 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002149 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002150 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2151 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002152 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002153
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002154 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002155 /* For unconfigured devices, set the HCI_RAW flag
2156 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002157 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002158 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002159 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002160
2161 /* For fully configured devices, this will send
2162 * the Index Added event. For unconfigured devices,
2163 * it will send Unconfigued Index Added event.
2164 *
2165 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2166 * and no event will be send.
2167 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002168 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002169 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002170 /* When the controller is now configured, then it
2171 * is important to clear the HCI_RAW flag.
2172 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002173 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002174 clear_bit(HCI_RAW, &hdev->flags);
2175
Marcel Holtmannd603b762014-07-06 12:11:14 +02002176 /* Powering on the controller with HCI_CONFIG set only
2177 * happens with the transition from unconfigured to
2178 * configured. This will send the Index Added event.
2179 */
2180 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002181 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002182}
2183
2184static void hci_power_off(struct work_struct *work)
2185{
Johan Hedberg32435532011-11-07 22:16:04 +02002186 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002187 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002188
2189 BT_DBG("%s", hdev->name);
2190
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002191 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002192}
2193
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002194static void hci_error_reset(struct work_struct *work)
2195{
2196 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2197
2198 BT_DBG("%s", hdev->name);
2199
2200 if (hdev->hw_error)
2201 hdev->hw_error(hdev, hdev->hw_error_code);
2202 else
2203 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2204 hdev->hw_error_code);
2205
2206 if (hci_dev_do_close(hdev))
2207 return;
2208
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002209 hci_dev_do_open(hdev);
2210}
2211
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002212static void hci_discov_off(struct work_struct *work)
2213{
2214 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002215
2216 hdev = container_of(work, struct hci_dev, discov_off.work);
2217
2218 BT_DBG("%s", hdev->name);
2219
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002220 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002221}
2222
Florian Grandel5d900e42015-06-18 03:16:35 +02002223static void hci_adv_timeout_expire(struct work_struct *work)
2224{
2225 struct hci_dev *hdev;
2226
2227 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2228
2229 BT_DBG("%s", hdev->name);
2230
2231 mgmt_adv_timeout_expired(hdev);
2232}
2233
Johan Hedberg35f74982014-02-18 17:14:32 +02002234void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002235{
Johan Hedberg48210022013-01-27 00:31:28 +02002236 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002237
Johan Hedberg48210022013-01-27 00:31:28 +02002238 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2239 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002240 kfree(uuid);
2241 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002242}
2243
Johan Hedberg35f74982014-02-18 17:14:32 +02002244void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002245{
Johan Hedberg0378b592014-11-19 15:22:22 +02002246 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002247
Johan Hedberg0378b592014-11-19 15:22:22 +02002248 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2249 list_del_rcu(&key->list);
2250 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002251 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002252}
2253
Johan Hedberg35f74982014-02-18 17:14:32 +02002254void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002255{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002256 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002257
Johan Hedberg970d0f12014-11-13 14:37:47 +02002258 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2259 list_del_rcu(&k->list);
2260 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002261 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002262}
2263
Johan Hedberg970c4e42014-02-18 10:19:33 +02002264void hci_smp_irks_clear(struct hci_dev *hdev)
2265{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002266 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002267
Johan Hedbergadae20c2014-11-13 14:37:48 +02002268 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2269 list_del_rcu(&k->list);
2270 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002271 }
2272}
2273
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002274struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2275{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002276 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002277
Johan Hedberg0378b592014-11-19 15:22:22 +02002278 rcu_read_lock();
2279 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2280 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2281 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002282 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002283 }
2284 }
2285 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002286
2287 return NULL;
2288}
2289
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302290static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002291 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002292{
2293 /* Legacy key */
2294 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302295 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002296
2297 /* Debug keys are insecure so don't store them persistently */
2298 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302299 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002300
2301 /* Changed combination key and there's no previous one */
2302 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302303 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002304
2305 /* Security mode 3 case */
2306 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302307 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002308
Johan Hedberge3befab2014-06-01 16:33:39 +03002309 /* BR/EDR key derived using SC from an LE link */
2310 if (conn->type == LE_LINK)
2311 return true;
2312
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002313 /* Neither local nor remote side had no-bonding as requirement */
2314 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302315 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002316
2317 /* Local side had dedicated bonding as requirement */
2318 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302319 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002320
2321 /* Remote side had dedicated bonding as requirement */
2322 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302323 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002324
2325 /* If none of the above criteria match, then don't store the key
2326 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302327 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002328}
2329
Johan Hedberge804d252014-07-16 11:42:28 +03002330static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002331{
Johan Hedberge804d252014-07-16 11:42:28 +03002332 if (type == SMP_LTK)
2333 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002334
Johan Hedberge804d252014-07-16 11:42:28 +03002335 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002336}
2337
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002338struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2339 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002340{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002341 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002342
Johan Hedberg970d0f12014-11-13 14:37:47 +02002343 rcu_read_lock();
2344 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002345 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2346 continue;
2347
Johan Hedberg923e2412014-12-03 12:43:39 +02002348 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002349 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002350 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002351 }
2352 }
2353 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002354
2355 return NULL;
2356}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002357
Johan Hedberg970c4e42014-02-18 10:19:33 +02002358struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2359{
2360 struct smp_irk *irk;
2361
Johan Hedbergadae20c2014-11-13 14:37:48 +02002362 rcu_read_lock();
2363 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2364 if (!bacmp(&irk->rpa, rpa)) {
2365 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002366 return irk;
2367 }
2368 }
2369
Johan Hedbergadae20c2014-11-13 14:37:48 +02002370 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2371 if (smp_irk_matches(hdev, irk->val, rpa)) {
2372 bacpy(&irk->rpa, rpa);
2373 rcu_read_unlock();
2374 return irk;
2375 }
2376 }
2377 rcu_read_unlock();
2378
Johan Hedberg970c4e42014-02-18 10:19:33 +02002379 return NULL;
2380}
2381
2382struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2383 u8 addr_type)
2384{
2385 struct smp_irk *irk;
2386
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002387 /* Identity Address must be public or static random */
2388 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2389 return NULL;
2390
Johan Hedbergadae20c2014-11-13 14:37:48 +02002391 rcu_read_lock();
2392 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002393 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002394 bacmp(bdaddr, &irk->bdaddr) == 0) {
2395 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002396 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002397 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002398 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002399 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002400
2401 return NULL;
2402}
2403
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002404struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002405 bdaddr_t *bdaddr, u8 *val, u8 type,
2406 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002407{
2408 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302409 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002410
2411 old_key = hci_find_link_key(hdev, bdaddr);
2412 if (old_key) {
2413 old_key_type = old_key->type;
2414 key = old_key;
2415 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002416 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002417 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002418 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002419 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002420 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002421 }
2422
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002423 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002424
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002425 /* Some buggy controller combinations generate a changed
2426 * combination key for legacy pairing even when there's no
2427 * previous key */
2428 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002429 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002430 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002431 if (conn)
2432 conn->key_type = type;
2433 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002434
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002435 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002436 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002437 key->pin_len = pin_len;
2438
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002439 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002440 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002441 else
2442 key->type = type;
2443
Johan Hedberg7652ff62014-06-24 13:15:49 +03002444 if (persistent)
2445 *persistent = hci_persistent_key(hdev, conn, type,
2446 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002447
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002448 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002449}
2450
Johan Hedbergca9142b2014-02-19 14:57:44 +02002451struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002452 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002453 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002454{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002455 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002456 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002457
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002458 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002459 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002460 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002461 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002462 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002463 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002464 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002465 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002466 }
2467
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002468 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002469 key->bdaddr_type = addr_type;
2470 memcpy(key->val, tk, sizeof(key->val));
2471 key->authenticated = authenticated;
2472 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002473 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002474 key->enc_size = enc_size;
2475 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002476
Johan Hedbergca9142b2014-02-19 14:57:44 +02002477 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002478}
2479
Johan Hedbergca9142b2014-02-19 14:57:44 +02002480struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2481 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002482{
2483 struct smp_irk *irk;
2484
2485 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2486 if (!irk) {
2487 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2488 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002489 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002490
2491 bacpy(&irk->bdaddr, bdaddr);
2492 irk->addr_type = addr_type;
2493
Johan Hedbergadae20c2014-11-13 14:37:48 +02002494 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002495 }
2496
2497 memcpy(irk->val, val, 16);
2498 bacpy(&irk->rpa, rpa);
2499
Johan Hedbergca9142b2014-02-19 14:57:44 +02002500 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002501}
2502
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002503int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2504{
2505 struct link_key *key;
2506
2507 key = hci_find_link_key(hdev, bdaddr);
2508 if (!key)
2509 return -ENOENT;
2510
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002511 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002512
Johan Hedberg0378b592014-11-19 15:22:22 +02002513 list_del_rcu(&key->list);
2514 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002515
2516 return 0;
2517}
2518
Johan Hedberge0b2b272014-02-18 17:14:31 +02002519int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002520{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002521 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002522 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002523
Johan Hedberg970d0f12014-11-13 14:37:47 +02002524 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002525 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002526 continue;
2527
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002528 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002529
Johan Hedberg970d0f12014-11-13 14:37:47 +02002530 list_del_rcu(&k->list);
2531 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002532 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002533 }
2534
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002535 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002536}
2537
Johan Hedberga7ec7332014-02-18 17:14:35 +02002538void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2539{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002540 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002541
Johan Hedbergadae20c2014-11-13 14:37:48 +02002542 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002543 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2544 continue;
2545
2546 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2547
Johan Hedbergadae20c2014-11-13 14:37:48 +02002548 list_del_rcu(&k->list);
2549 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002550 }
2551}
2552
Johan Hedberg55e76b32015-03-10 22:34:40 +02002553bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2554{
2555 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002556 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002557 u8 addr_type;
2558
2559 if (type == BDADDR_BREDR) {
2560 if (hci_find_link_key(hdev, bdaddr))
2561 return true;
2562 return false;
2563 }
2564
2565 /* Convert to HCI addr type which struct smp_ltk uses */
2566 if (type == BDADDR_LE_PUBLIC)
2567 addr_type = ADDR_LE_DEV_PUBLIC;
2568 else
2569 addr_type = ADDR_LE_DEV_RANDOM;
2570
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002571 irk = hci_get_irk(hdev, bdaddr, addr_type);
2572 if (irk) {
2573 bdaddr = &irk->bdaddr;
2574 addr_type = irk->addr_type;
2575 }
2576
Johan Hedberg55e76b32015-03-10 22:34:40 +02002577 rcu_read_lock();
2578 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002579 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2580 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002581 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002582 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002583 }
2584 rcu_read_unlock();
2585
2586 return false;
2587}
2588
Ville Tervo6bd32322011-02-16 16:32:41 +02002589/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002590static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002591{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002592 struct hci_dev *hdev = container_of(work, struct hci_dev,
2593 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002594
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002595 if (hdev->sent_cmd) {
2596 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2597 u16 opcode = __le16_to_cpu(sent->opcode);
2598
2599 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2600 } else {
2601 BT_ERR("%s command tx timeout", hdev->name);
2602 }
2603
Ville Tervo6bd32322011-02-16 16:32:41 +02002604 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002605 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002606}
2607
Szymon Janc2763eda2011-03-22 13:12:22 +01002608struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002609 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002610{
2611 struct oob_data *data;
2612
Johan Hedberg6928a922014-10-26 20:46:09 +01002613 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2614 if (bacmp(bdaddr, &data->bdaddr) != 0)
2615 continue;
2616 if (data->bdaddr_type != bdaddr_type)
2617 continue;
2618 return data;
2619 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002620
2621 return NULL;
2622}
2623
Johan Hedberg6928a922014-10-26 20:46:09 +01002624int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2625 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002626{
2627 struct oob_data *data;
2628
Johan Hedberg6928a922014-10-26 20:46:09 +01002629 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002630 if (!data)
2631 return -ENOENT;
2632
Johan Hedberg6928a922014-10-26 20:46:09 +01002633 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002634
2635 list_del(&data->list);
2636 kfree(data);
2637
2638 return 0;
2639}
2640
Johan Hedberg35f74982014-02-18 17:14:32 +02002641void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002642{
2643 struct oob_data *data, *n;
2644
2645 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2646 list_del(&data->list);
2647 kfree(data);
2648 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002649}
2650
Marcel Holtmann07988722014-01-10 02:07:29 -08002651int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002652 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002653 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002654{
2655 struct oob_data *data;
2656
Johan Hedberg6928a922014-10-26 20:46:09 +01002657 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002658 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002659 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002660 if (!data)
2661 return -ENOMEM;
2662
2663 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002664 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002665 list_add(&data->list, &hdev->remote_oob_data);
2666 }
2667
Johan Hedberg81328d52014-10-26 20:33:47 +01002668 if (hash192 && rand192) {
2669 memcpy(data->hash192, hash192, sizeof(data->hash192));
2670 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002671 if (hash256 && rand256)
2672 data->present = 0x03;
Johan Hedberg81328d52014-10-26 20:33:47 +01002673 } else {
2674 memset(data->hash192, 0, sizeof(data->hash192));
2675 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002676 if (hash256 && rand256)
2677 data->present = 0x02;
2678 else
2679 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002680 }
2681
Johan Hedberg81328d52014-10-26 20:33:47 +01002682 if (hash256 && rand256) {
2683 memcpy(data->hash256, hash256, sizeof(data->hash256));
2684 memcpy(data->rand256, rand256, sizeof(data->rand256));
2685 } else {
2686 memset(data->hash256, 0, sizeof(data->hash256));
2687 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002688 if (hash192 && rand192)
2689 data->present = 0x01;
Johan Hedberg81328d52014-10-26 20:33:47 +01002690 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002691
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002692 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002693
2694 return 0;
2695}
2696
Florian Grandeld2609b32015-06-18 03:16:34 +02002697/* This function requires the caller holds hdev->lock */
2698struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2699{
2700 struct adv_info *adv_instance;
2701
2702 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2703 if (adv_instance->instance == instance)
2704 return adv_instance;
2705 }
2706
2707 return NULL;
2708}
2709
2710/* This function requires the caller holds hdev->lock */
2711struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2712 struct adv_info *cur_instance;
2713
2714 cur_instance = hci_find_adv_instance(hdev, instance);
2715 if (!cur_instance)
2716 return NULL;
2717
2718 if (cur_instance == list_last_entry(&hdev->adv_instances,
2719 struct adv_info, list))
2720 return list_first_entry(&hdev->adv_instances,
2721 struct adv_info, list);
2722 else
2723 return list_next_entry(cur_instance, list);
2724}
2725
2726/* This function requires the caller holds hdev->lock */
2727int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2728{
2729 struct adv_info *adv_instance;
2730
2731 adv_instance = hci_find_adv_instance(hdev, instance);
2732 if (!adv_instance)
2733 return -ENOENT;
2734
2735 BT_DBG("%s removing %dMR", hdev->name, instance);
2736
Florian Grandel5d900e42015-06-18 03:16:35 +02002737 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2738 cancel_delayed_work(&hdev->adv_instance_expire);
2739 hdev->adv_instance_timeout = 0;
2740 }
2741
Florian Grandeld2609b32015-06-18 03:16:34 +02002742 list_del(&adv_instance->list);
2743 kfree(adv_instance);
2744
2745 hdev->adv_instance_cnt--;
2746
2747 return 0;
2748}
2749
2750/* This function requires the caller holds hdev->lock */
2751void hci_adv_instances_clear(struct hci_dev *hdev)
2752{
2753 struct adv_info *adv_instance, *n;
2754
Florian Grandel5d900e42015-06-18 03:16:35 +02002755 if (hdev->adv_instance_timeout) {
2756 cancel_delayed_work(&hdev->adv_instance_expire);
2757 hdev->adv_instance_timeout = 0;
2758 }
2759
Florian Grandeld2609b32015-06-18 03:16:34 +02002760 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2761 list_del(&adv_instance->list);
2762 kfree(adv_instance);
2763 }
2764
2765 hdev->adv_instance_cnt = 0;
2766}
2767
2768/* This function requires the caller holds hdev->lock */
2769int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2770 u16 adv_data_len, u8 *adv_data,
2771 u16 scan_rsp_len, u8 *scan_rsp_data,
2772 u16 timeout, u16 duration)
2773{
2774 struct adv_info *adv_instance;
2775
2776 adv_instance = hci_find_adv_instance(hdev, instance);
2777 if (adv_instance) {
2778 memset(adv_instance->adv_data, 0,
2779 sizeof(adv_instance->adv_data));
2780 memset(adv_instance->scan_rsp_data, 0,
2781 sizeof(adv_instance->scan_rsp_data));
2782 } else {
2783 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2784 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2785 return -EOVERFLOW;
2786
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002787 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002788 if (!adv_instance)
2789 return -ENOMEM;
2790
Florian Grandelfffd38b2015-06-18 03:16:47 +02002791 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002792 adv_instance->instance = instance;
2793 list_add(&adv_instance->list, &hdev->adv_instances);
2794 hdev->adv_instance_cnt++;
2795 }
2796
2797 adv_instance->flags = flags;
2798 adv_instance->adv_data_len = adv_data_len;
2799 adv_instance->scan_rsp_len = scan_rsp_len;
2800
2801 if (adv_data_len)
2802 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2803
2804 if (scan_rsp_len)
2805 memcpy(adv_instance->scan_rsp_data,
2806 scan_rsp_data, scan_rsp_len);
2807
2808 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002809 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002810
2811 if (duration == 0)
2812 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2813 else
2814 adv_instance->duration = duration;
2815
2816 BT_DBG("%s for %dMR", hdev->name, instance);
2817
2818 return 0;
2819}
2820
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002821struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002822 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002823{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002824 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002825
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002826 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002827 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002828 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002829 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002830
2831 return NULL;
2832}
2833
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002834void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002835{
2836 struct list_head *p, *n;
2837
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002838 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002839 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002840
2841 list_del(p);
2842 kfree(b);
2843 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002844}
2845
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002846int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002847{
2848 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002849
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002850 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002851 return -EBADF;
2852
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002853 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002854 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002855
Johan Hedberg27f70f32014-07-21 10:50:06 +03002856 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002857 if (!entry)
2858 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002859
2860 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002861 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002862
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002863 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002864
2865 return 0;
2866}
2867
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002868int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002869{
2870 struct bdaddr_list *entry;
2871
Johan Hedberg35f74982014-02-18 17:14:32 +02002872 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002873 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002874 return 0;
2875 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002876
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002877 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002878 if (!entry)
2879 return -ENOENT;
2880
2881 list_del(&entry->list);
2882 kfree(entry);
2883
2884 return 0;
2885}
2886
Andre Guedes15819a72014-02-03 13:56:18 -03002887/* This function requires the caller holds hdev->lock */
2888struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2889 bdaddr_t *addr, u8 addr_type)
2890{
2891 struct hci_conn_params *params;
2892
2893 list_for_each_entry(params, &hdev->le_conn_params, list) {
2894 if (bacmp(&params->addr, addr) == 0 &&
2895 params->addr_type == addr_type) {
2896 return params;
2897 }
2898 }
2899
2900 return NULL;
2901}
2902
2903/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002904struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2905 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002906{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002907 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002908
Johan Hedberg501f8822014-07-04 12:37:26 +03002909 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002910 if (bacmp(&param->addr, addr) == 0 &&
2911 param->addr_type == addr_type)
2912 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002913 }
2914
2915 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002916}
2917
2918/* This function requires the caller holds hdev->lock */
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02002919struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2920 bdaddr_t *addr,
2921 u8 addr_type)
2922{
2923 struct hci_conn_params *param;
2924
2925 list_for_each_entry(param, &hdev->pend_le_conns, action) {
2926 if (bacmp(&param->addr, addr) == 0 &&
2927 param->addr_type == addr_type &&
2928 param->explicit_connect)
2929 return param;
2930 }
2931
2932 list_for_each_entry(param, &hdev->pend_le_reports, action) {
2933 if (bacmp(&param->addr, addr) == 0 &&
2934 param->addr_type == addr_type &&
2935 param->explicit_connect)
2936 return param;
2937 }
2938
2939 return NULL;
2940}
2941
2942/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002943struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2944 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002945{
2946 struct hci_conn_params *params;
2947
2948 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002949 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002950 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002951
2952 params = kzalloc(sizeof(*params), GFP_KERNEL);
2953 if (!params) {
2954 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002955 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002956 }
2957
2958 bacpy(&params->addr, addr);
2959 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002960
2961 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002962 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002963
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002964 params->conn_min_interval = hdev->le_conn_min_interval;
2965 params->conn_max_interval = hdev->le_conn_max_interval;
2966 params->conn_latency = hdev->le_conn_latency;
2967 params->supervision_timeout = hdev->le_supv_timeout;
2968 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2969
2970 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2971
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002972 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002973}
2974
Johan Hedbergf6c63242014-08-15 21:06:59 +03002975static void hci_conn_params_free(struct hci_conn_params *params)
2976{
2977 if (params->conn) {
2978 hci_conn_drop(params->conn);
2979 hci_conn_put(params->conn);
2980 }
2981
2982 list_del(&params->action);
2983 list_del(&params->list);
2984 kfree(params);
2985}
2986
Andre Guedes15819a72014-02-03 13:56:18 -03002987/* This function requires the caller holds hdev->lock */
2988void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2989{
2990 struct hci_conn_params *params;
2991
2992 params = hci_conn_params_lookup(hdev, addr, addr_type);
2993 if (!params)
2994 return;
2995
Johan Hedbergf6c63242014-08-15 21:06:59 +03002996 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002997
Johan Hedberg95305ba2014-07-04 12:37:21 +03002998 hci_update_background_scan(hdev);
2999
Andre Guedes15819a72014-02-03 13:56:18 -03003000 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3001}
3002
3003/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003004void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003005{
3006 struct hci_conn_params *params, *tmp;
3007
3008 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03003009 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3010 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02003011
3012 /* If trying to estabilish one time connection to disabled
3013 * device, leave the params, but mark them as just once.
3014 */
3015 if (params->explicit_connect) {
3016 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3017 continue;
3018 }
3019
Andre Guedes15819a72014-02-03 13:56:18 -03003020 list_del(&params->list);
3021 kfree(params);
3022 }
3023
Johan Hedberg55af49a2014-07-02 17:37:26 +03003024 BT_DBG("All LE disabled connection parameters were removed");
3025}
3026
3027/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003028void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003029{
3030 struct hci_conn_params *params, *tmp;
3031
Johan Hedbergf6c63242014-08-15 21:06:59 +03003032 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3033 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003034
Johan Hedberga2f41a82014-07-04 12:37:19 +03003035 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003036
Andre Guedes15819a72014-02-03 13:56:18 -03003037 BT_DBG("All LE connection parameters were removed");
3038}
3039
Marcel Holtmann1904a852015-01-11 13:50:44 -08003040static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003041{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003042 if (status) {
3043 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003044
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003045 hci_dev_lock(hdev);
3046 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3047 hci_dev_unlock(hdev);
3048 return;
3049 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003050}
3051
Marcel Holtmann1904a852015-01-11 13:50:44 -08003052static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3053 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003054{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003055 /* General inquiry access code (GIAC) */
3056 u8 lap[3] = { 0x33, 0x8b, 0x9e };
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003057 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003058 int err;
3059
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003060 if (status) {
3061 BT_ERR("Failed to disable LE scanning: status %d", status);
3062 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003063 }
3064
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003065 hdev->discovery.scan_start = 0;
3066
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003067 switch (hdev->discovery.type) {
3068 case DISCOV_TYPE_LE:
3069 hci_dev_lock(hdev);
3070 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3071 hci_dev_unlock(hdev);
3072 break;
3073
3074 case DISCOV_TYPE_INTERLEAVED:
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003075 hci_dev_lock(hdev);
3076
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003077 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3078 &hdev->quirks)) {
3079 /* If we were running LE only scan, change discovery
3080 * state. If we were running both LE and BR/EDR inquiry
3081 * simultaneously, and BR/EDR inquiry is already
3082 * finished, stop discovery, otherwise BR/EDR inquiry
Wesley Kuo177d0502015-05-13 10:33:15 +08003083 * will stop discovery when finished. If we will resolve
3084 * remote device name, do not change discovery state.
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003085 */
Wesley Kuo177d0502015-05-13 10:33:15 +08003086 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3087 hdev->discovery.state != DISCOVERY_RESOLVING)
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003088 hci_discovery_set_state(hdev,
3089 DISCOVERY_STOPPED);
3090 } else {
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003091 struct hci_request req;
3092
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003093 hci_inquiry_cache_flush(hdev);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003094
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003095 hci_req_init(&req, hdev);
3096
3097 memset(&cp, 0, sizeof(cp));
3098 memcpy(&cp.lap, lap, sizeof(cp.lap));
3099 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3100 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3101
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003102 err = hci_req_run(&req, inquiry_complete);
3103 if (err) {
3104 BT_ERR("Inquiry request failed: err %d", err);
3105 hci_discovery_set_state(hdev,
3106 DISCOVERY_STOPPED);
3107 }
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003108 }
3109
3110 hci_dev_unlock(hdev);
3111 break;
3112 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003113}
3114
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003115static void le_scan_disable_work(struct work_struct *work)
3116{
3117 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003118 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003119 struct hci_request req;
3120 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003121
3122 BT_DBG("%s", hdev->name);
3123
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003124 cancel_delayed_work_sync(&hdev->le_scan_restart);
3125
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003126 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003127
Andre Guedesb1efcc22014-02-26 20:21:40 -03003128 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003129
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003130 err = hci_req_run(&req, le_scan_disable_work_complete);
3131 if (err)
3132 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003133}
3134
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003135static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3136 u16 opcode)
3137{
3138 unsigned long timeout, duration, scan_start, now;
3139
3140 BT_DBG("%s", hdev->name);
3141
3142 if (status) {
3143 BT_ERR("Failed to restart LE scan: status %d", status);
3144 return;
3145 }
3146
3147 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3148 !hdev->discovery.scan_start)
3149 return;
3150
3151 /* When the scan was started, hdev->le_scan_disable has been queued
3152 * after duration from scan_start. During scan restart this job
3153 * has been canceled, and we need to queue it again after proper
3154 * timeout, to make sure that scan does not run indefinitely.
3155 */
3156 duration = hdev->discovery.scan_duration;
3157 scan_start = hdev->discovery.scan_start;
3158 now = jiffies;
3159 if (now - scan_start <= duration) {
3160 int elapsed;
3161
3162 if (now >= scan_start)
3163 elapsed = now - scan_start;
3164 else
3165 elapsed = ULONG_MAX - scan_start + now;
3166
3167 timeout = duration - elapsed;
3168 } else {
3169 timeout = 0;
3170 }
3171 queue_delayed_work(hdev->workqueue,
3172 &hdev->le_scan_disable, timeout);
3173}
3174
3175static void le_scan_restart_work(struct work_struct *work)
3176{
3177 struct hci_dev *hdev = container_of(work, struct hci_dev,
3178 le_scan_restart.work);
3179 struct hci_request req;
3180 struct hci_cp_le_set_scan_enable cp;
3181 int err;
3182
3183 BT_DBG("%s", hdev->name);
3184
3185 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003186 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003187 return;
3188
3189 hci_req_init(&req, hdev);
3190
3191 hci_req_add_le_scan_disable(&req);
3192
3193 memset(&cp, 0, sizeof(cp));
3194 cp.enable = LE_SCAN_ENABLE;
3195 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3196 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3197
3198 err = hci_req_run(&req, le_scan_restart_work_complete);
3199 if (err)
3200 BT_ERR("Restart LE scan request failed: err %d", err);
3201}
3202
Johan Hedberga1f4c312014-02-27 14:05:41 +02003203/* Copy the Identity Address of the controller.
3204 *
3205 * If the controller has a public BD_ADDR, then by default use that one.
3206 * If this is a LE only controller without a public address, default to
3207 * the static random address.
3208 *
3209 * For debugging purposes it is possible to force controllers with a
3210 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003211 *
3212 * In case BR/EDR has been disabled on a dual-mode controller and
3213 * userspace has configured a static address, then that address
3214 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02003215 */
3216void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3217 u8 *bdaddr_type)
3218{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07003219 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003220 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003221 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003222 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02003223 bacpy(bdaddr, &hdev->static_addr);
3224 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3225 } else {
3226 bacpy(bdaddr, &hdev->bdaddr);
3227 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3228 }
3229}
3230
David Herrmann9be0dab2012-04-22 14:39:57 +02003231/* Alloc HCI device */
3232struct hci_dev *hci_alloc_dev(void)
3233{
3234 struct hci_dev *hdev;
3235
Johan Hedberg27f70f32014-07-21 10:50:06 +03003236 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003237 if (!hdev)
3238 return NULL;
3239
David Herrmannb1b813d2012-04-22 14:39:58 +02003240 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3241 hdev->esco_type = (ESCO_HV1);
3242 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003243 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3244 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003245 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003246 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3247 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02003248 hdev->adv_instance_cnt = 0;
3249 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02003250 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02003251
David Herrmannb1b813d2012-04-22 14:39:58 +02003252 hdev->sniff_max_interval = 800;
3253 hdev->sniff_min_interval = 80;
3254
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003255 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003256 hdev->le_adv_min_interval = 0x0800;
3257 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003258 hdev->le_scan_interval = 0x0060;
3259 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003260 hdev->le_conn_min_interval = 0x0028;
3261 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003262 hdev->le_conn_latency = 0x0000;
3263 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01003264 hdev->le_def_tx_len = 0x001b;
3265 hdev->le_def_tx_time = 0x0148;
3266 hdev->le_max_tx_len = 0x001b;
3267 hdev->le_max_tx_time = 0x0148;
3268 hdev->le_max_rx_len = 0x001b;
3269 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003270
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003271 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003272 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003273 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3274 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003275
David Herrmannb1b813d2012-04-22 14:39:58 +02003276 mutex_init(&hdev->lock);
3277 mutex_init(&hdev->req_lock);
3278
3279 INIT_LIST_HEAD(&hdev->mgmt_pending);
3280 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003281 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003282 INIT_LIST_HEAD(&hdev->uuids);
3283 INIT_LIST_HEAD(&hdev->link_keys);
3284 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003285 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003286 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003287 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003288 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003289 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003290 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003291 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02003292 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02003293
3294 INIT_WORK(&hdev->rx_work, hci_rx_work);
3295 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3296 INIT_WORK(&hdev->tx_work, hci_tx_work);
3297 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003298 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003299
David Herrmannb1b813d2012-04-22 14:39:58 +02003300 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3301 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3302 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003303 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Florian Grandel5d900e42015-06-18 03:16:35 +02003304 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
David Herrmannb1b813d2012-04-22 14:39:58 +02003305
David Herrmannb1b813d2012-04-22 14:39:58 +02003306 skb_queue_head_init(&hdev->rx_q);
3307 skb_queue_head_init(&hdev->cmd_q);
3308 skb_queue_head_init(&hdev->raw_q);
3309
3310 init_waitqueue_head(&hdev->req_wait_q);
3311
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003312 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003313
David Herrmannb1b813d2012-04-22 14:39:58 +02003314 hci_init_sysfs(hdev);
3315 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003316
3317 return hdev;
3318}
3319EXPORT_SYMBOL(hci_alloc_dev);
3320
3321/* Free HCI device */
3322void hci_free_dev(struct hci_dev *hdev)
3323{
David Herrmann9be0dab2012-04-22 14:39:57 +02003324 /* will free via device release */
3325 put_device(&hdev->dev);
3326}
3327EXPORT_SYMBOL(hci_free_dev);
3328
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329/* Register HCI device */
3330int hci_register_dev(struct hci_dev *hdev)
3331{
David Herrmannb1b813d2012-04-22 14:39:58 +02003332 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333
Marcel Holtmann74292d52014-07-06 15:50:27 +02003334 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 return -EINVAL;
3336
Mat Martineau08add512011-11-02 16:18:36 -07003337 /* Do not allow HCI_AMP devices to register at index 0,
3338 * so the index can be used as the AMP controller ID.
3339 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003340 switch (hdev->dev_type) {
3341 case HCI_BREDR:
3342 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3343 break;
3344 case HCI_AMP:
3345 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3346 break;
3347 default:
3348 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003350
Sasha Levin3df92b32012-05-27 22:36:56 +02003351 if (id < 0)
3352 return id;
3353
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 sprintf(hdev->name, "hci%d", id);
3355 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003356
3357 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3358
Kees Cookd8537542013-07-03 15:04:57 -07003359 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3360 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003361 if (!hdev->workqueue) {
3362 error = -ENOMEM;
3363 goto err;
3364 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003365
Kees Cookd8537542013-07-03 15:04:57 -07003366 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3367 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003368 if (!hdev->req_workqueue) {
3369 destroy_workqueue(hdev->workqueue);
3370 error = -ENOMEM;
3371 goto err;
3372 }
3373
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003374 if (!IS_ERR_OR_NULL(bt_debugfs))
3375 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3376
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003377 dev_set_name(&hdev->dev, "%s", hdev->name);
3378
3379 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003380 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003381 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003383 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003384 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3385 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003386 if (hdev->rfkill) {
3387 if (rfkill_register(hdev->rfkill) < 0) {
3388 rfkill_destroy(hdev->rfkill);
3389 hdev->rfkill = NULL;
3390 }
3391 }
3392
Johan Hedberg5e130362013-09-13 08:58:17 +03003393 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003394 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003395
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003396 hci_dev_set_flag(hdev, HCI_SETUP);
3397 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003398
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003399 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003400 /* Assume BR/EDR support until proven otherwise (such as
3401 * through reading supported features during init.
3402 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003403 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003404 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003405
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003406 write_lock(&hci_dev_list_lock);
3407 list_add(&hdev->list, &hci_dev_list);
3408 write_unlock(&hci_dev_list_lock);
3409
Marcel Holtmann4a964402014-07-02 19:10:33 +02003410 /* Devices that are marked for raw-only usage are unconfigured
3411 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003412 */
3413 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003414 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003415
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003417 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418
Johan Hedberg19202572013-01-14 22:33:51 +02003419 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003420
Linus Torvalds1da177e2005-04-16 15:20:36 -07003421 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003422
David Herrmann33ca9542011-10-08 14:58:49 +02003423err_wqueue:
3424 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003425 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003426err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003427 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003428
David Herrmann33ca9542011-10-08 14:58:49 +02003429 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430}
3431EXPORT_SYMBOL(hci_register_dev);
3432
3433/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003434void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003436 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003437
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003438 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003440 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003441
Sasha Levin3df92b32012-05-27 22:36:56 +02003442 id = hdev->id;
3443
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003444 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003446 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447
3448 hci_dev_do_close(hdev);
3449
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003450 cancel_work_sync(&hdev->power_on);
3451
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003452 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003453 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3454 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003455 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003456 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003457 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003458 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003459
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003460 /* mgmt_index_removed should take care of emptying the
3461 * pending list */
3462 BUG_ON(!list_empty(&hdev->mgmt_pending));
3463
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 hci_notify(hdev, HCI_DEV_UNREG);
3465
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003466 if (hdev->rfkill) {
3467 rfkill_unregister(hdev->rfkill);
3468 rfkill_destroy(hdev->rfkill);
3469 }
3470
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003471 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003472
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003473 debugfs_remove_recursive(hdev->debugfs);
3474
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003475 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003476 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003477
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003478 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003479 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003480 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003481 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003482 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003483 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003484 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003485 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003486 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003487 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003488 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003489 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003490 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003491
David Herrmanndc946bd2012-01-07 15:47:24 +01003492 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003493
3494 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495}
3496EXPORT_SYMBOL(hci_unregister_dev);
3497
3498/* Suspend HCI device */
3499int hci_suspend_dev(struct hci_dev *hdev)
3500{
3501 hci_notify(hdev, HCI_DEV_SUSPEND);
3502 return 0;
3503}
3504EXPORT_SYMBOL(hci_suspend_dev);
3505
3506/* Resume HCI device */
3507int hci_resume_dev(struct hci_dev *hdev)
3508{
3509 hci_notify(hdev, HCI_DEV_RESUME);
3510 return 0;
3511}
3512EXPORT_SYMBOL(hci_resume_dev);
3513
Marcel Holtmann75e05692014-11-02 08:15:38 +01003514/* Reset HCI device */
3515int hci_reset_dev(struct hci_dev *hdev)
3516{
3517 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3518 struct sk_buff *skb;
3519
3520 skb = bt_skb_alloc(3, GFP_ATOMIC);
3521 if (!skb)
3522 return -ENOMEM;
3523
3524 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3525 memcpy(skb_put(skb, 3), hw_err, 3);
3526
3527 /* Send Hardware Error to upper stack */
3528 return hci_recv_frame(hdev, skb);
3529}
3530EXPORT_SYMBOL(hci_reset_dev);
3531
Marcel Holtmann76bca882009-11-18 00:40:39 +01003532/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003533int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003534{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003535 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003536 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003537 kfree_skb(skb);
3538 return -ENXIO;
3539 }
3540
Marcel Holtmannfe806dc2015-10-08 03:14:28 +02003541 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3542 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3543 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3544 kfree_skb(skb);
3545 return -EINVAL;
3546 }
3547
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003548 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003549 bt_cb(skb)->incoming = 1;
3550
3551 /* Time stamp */
3552 __net_timestamp(skb);
3553
Marcel Holtmann76bca882009-11-18 00:40:39 +01003554 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003555 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003556
Marcel Holtmann76bca882009-11-18 00:40:39 +01003557 return 0;
3558}
3559EXPORT_SYMBOL(hci_recv_frame);
3560
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003561/* Receive diagnostic message from HCI drivers */
3562int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3563{
3564 /* Time stamp */
3565 __net_timestamp(skb);
3566
3567 /* Mark as diagnostic packet and send to monitor */
3568 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3569 hci_send_to_monitor(hdev, skb);
3570
3571 kfree_skb(skb);
3572 return 0;
3573}
3574EXPORT_SYMBOL(hci_recv_diag);
3575
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576/* ---- Interface to upper protocols ---- */
3577
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578int hci_register_cb(struct hci_cb *cb)
3579{
3580 BT_DBG("%p name %s", cb, cb->name);
3581
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003582 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003583 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003584 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585
3586 return 0;
3587}
3588EXPORT_SYMBOL(hci_register_cb);
3589
3590int hci_unregister_cb(struct hci_cb *cb)
3591{
3592 BT_DBG("%p name %s", cb, cb->name);
3593
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003594 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003596 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597
3598 return 0;
3599}
3600EXPORT_SYMBOL(hci_unregister_cb);
3601
Marcel Holtmann51086992013-10-10 14:54:19 -07003602static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003604 int err;
3605
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003606 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003608 /* Time stamp */
3609 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003611 /* Send copy to monitor */
3612 hci_send_to_monitor(hdev, skb);
3613
3614 if (atomic_read(&hdev->promisc)) {
3615 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003616 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 }
3618
3619 /* Get rid of skb owner, prior to sending to the driver. */
3620 skb_orphan(skb);
3621
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003622 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3623 kfree_skb(skb);
3624 return;
3625 }
3626
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003627 err = hdev->send(hdev, skb);
3628 if (err < 0) {
3629 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3630 kfree_skb(skb);
3631 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632}
3633
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003634/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003635int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3636 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003637{
3638 struct sk_buff *skb;
3639
3640 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3641
3642 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3643 if (!skb) {
3644 BT_ERR("%s no memory for command", hdev->name);
3645 return -ENOMEM;
3646 }
3647
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003648 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003649 * single-command requests.
3650 */
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03003651 bt_cb(skb)->req.start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02003652
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003654 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655
3656 return 0;
3657}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658
3659/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003660void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661{
3662 struct hci_command_hdr *hdr;
3663
3664 if (!hdev->sent_cmd)
3665 return NULL;
3666
3667 hdr = (void *) hdev->sent_cmd->data;
3668
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003669 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 return NULL;
3671
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003672 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673
3674 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3675}
3676
Loic Poulainfbef1682015-09-29 15:05:44 +02003677/* Send HCI command and wait for command commplete event */
3678struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3679 const void *param, u32 timeout)
3680{
3681 struct sk_buff *skb;
3682
3683 if (!test_bit(HCI_UP, &hdev->flags))
3684 return ERR_PTR(-ENETDOWN);
3685
3686 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3687
3688 hci_req_lock(hdev);
3689 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3690 hci_req_unlock(hdev);
3691
3692 return skb;
3693}
3694EXPORT_SYMBOL(hci_cmd_sync);
3695
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696/* Send ACL data */
3697static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3698{
3699 struct hci_acl_hdr *hdr;
3700 int len = skb->len;
3701
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003702 skb_push(skb, HCI_ACL_HDR_SIZE);
3703 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003704 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003705 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3706 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707}
3708
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003709static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003710 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003712 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713 struct hci_dev *hdev = conn->hdev;
3714 struct sk_buff *list;
3715
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003716 skb->len = skb_headlen(skb);
3717 skb->data_len = 0;
3718
3719 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003720
3721 switch (hdev->dev_type) {
3722 case HCI_BREDR:
3723 hci_add_acl_hdr(skb, conn->handle, flags);
3724 break;
3725 case HCI_AMP:
3726 hci_add_acl_hdr(skb, chan->handle, flags);
3727 break;
3728 default:
3729 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3730 return;
3731 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003732
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003733 list = skb_shinfo(skb)->frag_list;
3734 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 /* Non fragmented */
3736 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3737
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003738 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739 } else {
3740 /* Fragmented */
3741 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3742
3743 skb_shinfo(skb)->frag_list = NULL;
3744
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003745 /* Queue all fragments atomically. We need to use spin_lock_bh
3746 * here because of 6LoWPAN links, as there this function is
3747 * called from softirq and using normal spin lock could cause
3748 * deadlocks.
3749 */
3750 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003752 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003753
3754 flags &= ~ACL_START;
3755 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756 do {
3757 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003758
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003759 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003760 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761
3762 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3763
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003764 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 } while (list);
3766
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003767 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003769}
3770
3771void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3772{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003773 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003774
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003775 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003776
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003777 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003779 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781
3782/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003783void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784{
3785 struct hci_dev *hdev = conn->hdev;
3786 struct hci_sco_hdr hdr;
3787
3788 BT_DBG("%s len %d", hdev->name, skb->len);
3789
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003790 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791 hdr.dlen = skb->len;
3792
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003793 skb_push(skb, HCI_SCO_HDR_SIZE);
3794 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003795 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003796
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003797 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003798
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003800 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802
3803/* ---- HCI TX task (outgoing data) ---- */
3804
3805/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003806static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3807 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808{
3809 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003810 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003811 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003813 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003815
3816 rcu_read_lock();
3817
3818 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003819 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003821
3822 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3823 continue;
3824
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 num++;
3826
3827 if (c->sent < min) {
3828 min = c->sent;
3829 conn = c;
3830 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003831
3832 if (hci_conn_num(hdev, type) == num)
3833 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834 }
3835
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003836 rcu_read_unlock();
3837
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003839 int cnt, q;
3840
3841 switch (conn->type) {
3842 case ACL_LINK:
3843 cnt = hdev->acl_cnt;
3844 break;
3845 case SCO_LINK:
3846 case ESCO_LINK:
3847 cnt = hdev->sco_cnt;
3848 break;
3849 case LE_LINK:
3850 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3851 break;
3852 default:
3853 cnt = 0;
3854 BT_ERR("Unknown link type");
3855 }
3856
3857 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 *quote = q ? q : 1;
3859 } else
3860 *quote = 0;
3861
3862 BT_DBG("conn %p quote %d", conn, *quote);
3863 return conn;
3864}
3865
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003866static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867{
3868 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003869 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870
Ville Tervobae1f5d92011-02-10 22:38:53 -03003871 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003873 rcu_read_lock();
3874
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003876 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003877 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003878 BT_ERR("%s killing stalled connection %pMR",
3879 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003880 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881 }
3882 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003883
3884 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885}
3886
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003887static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3888 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003889{
3890 struct hci_conn_hash *h = &hdev->conn_hash;
3891 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003892 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003893 struct hci_conn *conn;
3894 int cnt, q, conn_num = 0;
3895
3896 BT_DBG("%s", hdev->name);
3897
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003898 rcu_read_lock();
3899
3900 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003901 struct hci_chan *tmp;
3902
3903 if (conn->type != type)
3904 continue;
3905
3906 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3907 continue;
3908
3909 conn_num++;
3910
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003911 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003912 struct sk_buff *skb;
3913
3914 if (skb_queue_empty(&tmp->data_q))
3915 continue;
3916
3917 skb = skb_peek(&tmp->data_q);
3918 if (skb->priority < cur_prio)
3919 continue;
3920
3921 if (skb->priority > cur_prio) {
3922 num = 0;
3923 min = ~0;
3924 cur_prio = skb->priority;
3925 }
3926
3927 num++;
3928
3929 if (conn->sent < min) {
3930 min = conn->sent;
3931 chan = tmp;
3932 }
3933 }
3934
3935 if (hci_conn_num(hdev, type) == conn_num)
3936 break;
3937 }
3938
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003939 rcu_read_unlock();
3940
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003941 if (!chan)
3942 return NULL;
3943
3944 switch (chan->conn->type) {
3945 case ACL_LINK:
3946 cnt = hdev->acl_cnt;
3947 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003948 case AMP_LINK:
3949 cnt = hdev->block_cnt;
3950 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003951 case SCO_LINK:
3952 case ESCO_LINK:
3953 cnt = hdev->sco_cnt;
3954 break;
3955 case LE_LINK:
3956 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3957 break;
3958 default:
3959 cnt = 0;
3960 BT_ERR("Unknown link type");
3961 }
3962
3963 q = cnt / num;
3964 *quote = q ? q : 1;
3965 BT_DBG("chan %p quote %d", chan, *quote);
3966 return chan;
3967}
3968
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003969static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3970{
3971 struct hci_conn_hash *h = &hdev->conn_hash;
3972 struct hci_conn *conn;
3973 int num = 0;
3974
3975 BT_DBG("%s", hdev->name);
3976
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003977 rcu_read_lock();
3978
3979 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003980 struct hci_chan *chan;
3981
3982 if (conn->type != type)
3983 continue;
3984
3985 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3986 continue;
3987
3988 num++;
3989
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003990 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003991 struct sk_buff *skb;
3992
3993 if (chan->sent) {
3994 chan->sent = 0;
3995 continue;
3996 }
3997
3998 if (skb_queue_empty(&chan->data_q))
3999 continue;
4000
4001 skb = skb_peek(&chan->data_q);
4002 if (skb->priority >= HCI_PRIO_MAX - 1)
4003 continue;
4004
4005 skb->priority = HCI_PRIO_MAX - 1;
4006
4007 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004008 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004009 }
4010
4011 if (hci_conn_num(hdev, type) == num)
4012 break;
4013 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004014
4015 rcu_read_unlock();
4016
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004017}
4018
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004019static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4020{
4021 /* Calculate count of blocks used by this packet */
4022 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4023}
4024
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004025static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004027 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028 /* ACL tx timeout must be longer than maximum
4029 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004030 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004031 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004032 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004034}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004036static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004037{
4038 unsigned int cnt = hdev->acl_cnt;
4039 struct hci_chan *chan;
4040 struct sk_buff *skb;
4041 int quote;
4042
4043 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004044
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004045 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004046 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004047 u32 priority = (skb_peek(&chan->data_q))->priority;
4048 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004049 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004050 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004051
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004052 /* Stop if priority has changed */
4053 if (skb->priority < priority)
4054 break;
4055
4056 skb = skb_dequeue(&chan->data_q);
4057
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004058 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004059 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004060
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004061 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062 hdev->acl_last_tx = jiffies;
4063
4064 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004065 chan->sent++;
4066 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067 }
4068 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004069
4070 if (cnt != hdev->acl_cnt)
4071 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072}
4073
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004074static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004075{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004076 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004077 struct hci_chan *chan;
4078 struct sk_buff *skb;
4079 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004080 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004081
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004082 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004083
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004084 BT_DBG("%s", hdev->name);
4085
4086 if (hdev->dev_type == HCI_AMP)
4087 type = AMP_LINK;
4088 else
4089 type = ACL_LINK;
4090
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004091 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004092 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004093 u32 priority = (skb_peek(&chan->data_q))->priority;
4094 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4095 int blocks;
4096
4097 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004098 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004099
4100 /* Stop if priority has changed */
4101 if (skb->priority < priority)
4102 break;
4103
4104 skb = skb_dequeue(&chan->data_q);
4105
4106 blocks = __get_blocks(hdev, skb);
4107 if (blocks > hdev->block_cnt)
4108 return;
4109
4110 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004111 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004112
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004113 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004114 hdev->acl_last_tx = jiffies;
4115
4116 hdev->block_cnt -= blocks;
4117 quote -= blocks;
4118
4119 chan->sent += blocks;
4120 chan->conn->sent += blocks;
4121 }
4122 }
4123
4124 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004125 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004126}
4127
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004128static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004129{
4130 BT_DBG("%s", hdev->name);
4131
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004132 /* No ACL link over BR/EDR controller */
4133 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4134 return;
4135
4136 /* No AMP link over AMP controller */
4137 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004138 return;
4139
4140 switch (hdev->flow_ctl_mode) {
4141 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4142 hci_sched_acl_pkt(hdev);
4143 break;
4144
4145 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4146 hci_sched_acl_blk(hdev);
4147 break;
4148 }
4149}
4150
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004152static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153{
4154 struct hci_conn *conn;
4155 struct sk_buff *skb;
4156 int quote;
4157
4158 BT_DBG("%s", hdev->name);
4159
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004160 if (!hci_conn_num(hdev, SCO_LINK))
4161 return;
4162
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4164 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4165 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004166 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167
4168 conn->sent++;
4169 if (conn->sent == ~0)
4170 conn->sent = 0;
4171 }
4172 }
4173}
4174
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004175static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004176{
4177 struct hci_conn *conn;
4178 struct sk_buff *skb;
4179 int quote;
4180
4181 BT_DBG("%s", hdev->name);
4182
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004183 if (!hci_conn_num(hdev, ESCO_LINK))
4184 return;
4185
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004186 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4187 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004188 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4189 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004190 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004191
4192 conn->sent++;
4193 if (conn->sent == ~0)
4194 conn->sent = 0;
4195 }
4196 }
4197}
4198
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004199static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004200{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004201 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004202 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004203 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004204
4205 BT_DBG("%s", hdev->name);
4206
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004207 if (!hci_conn_num(hdev, LE_LINK))
4208 return;
4209
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004210 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004211 /* LE tx timeout must be longer than maximum
4212 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004213 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004214 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004215 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004216 }
4217
4218 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004219 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004220 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004221 u32 priority = (skb_peek(&chan->data_q))->priority;
4222 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004223 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004224 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004225
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004226 /* Stop if priority has changed */
4227 if (skb->priority < priority)
4228 break;
4229
4230 skb = skb_dequeue(&chan->data_q);
4231
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004232 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004233 hdev->le_last_tx = jiffies;
4234
4235 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004236 chan->sent++;
4237 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004238 }
4239 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004240
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004241 if (hdev->le_pkts)
4242 hdev->le_cnt = cnt;
4243 else
4244 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004245
4246 if (cnt != tmp)
4247 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004248}
4249
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004250static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004252 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 struct sk_buff *skb;
4254
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004255 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004256 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004258 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07004259 /* Schedule queues and send stuff to HCI driver */
4260 hci_sched_acl(hdev);
4261 hci_sched_sco(hdev);
4262 hci_sched_esco(hdev);
4263 hci_sched_le(hdev);
4264 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004265
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266 /* Send next queued raw (unknown type) packet */
4267 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004268 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269}
4270
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004271/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272
4273/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004274static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275{
4276 struct hci_acl_hdr *hdr = (void *) skb->data;
4277 struct hci_conn *conn;
4278 __u16 handle, flags;
4279
4280 skb_pull(skb, HCI_ACL_HDR_SIZE);
4281
4282 handle = __le16_to_cpu(hdr->handle);
4283 flags = hci_flags(handle);
4284 handle = hci_handle(handle);
4285
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004286 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004287 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288
4289 hdev->stat.acl_rx++;
4290
4291 hci_dev_lock(hdev);
4292 conn = hci_conn_hash_lookup_handle(hdev, handle);
4293 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004294
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004296 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004297
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004299 l2cap_recv_acldata(conn, skb, flags);
4300 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004302 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004303 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304 }
4305
4306 kfree_skb(skb);
4307}
4308
4309/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004310static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311{
4312 struct hci_sco_hdr *hdr = (void *) skb->data;
4313 struct hci_conn *conn;
4314 __u16 handle;
4315
4316 skb_pull(skb, HCI_SCO_HDR_SIZE);
4317
4318 handle = __le16_to_cpu(hdr->handle);
4319
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004320 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321
4322 hdev->stat.sco_rx++;
4323
4324 hci_dev_lock(hdev);
4325 conn = hci_conn_hash_lookup_handle(hdev, handle);
4326 hci_dev_unlock(hdev);
4327
4328 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004330 sco_recv_scodata(conn, skb);
4331 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004333 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004334 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335 }
4336
4337 kfree_skb(skb);
4338}
4339
Johan Hedberg9238f362013-03-05 20:37:48 +02004340static bool hci_req_is_complete(struct hci_dev *hdev)
4341{
4342 struct sk_buff *skb;
4343
4344 skb = skb_peek(&hdev->cmd_q);
4345 if (!skb)
4346 return true;
4347
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03004348 return bt_cb(skb)->req.start;
Johan Hedberg9238f362013-03-05 20:37:48 +02004349}
4350
Johan Hedberg42c6b122013-03-05 20:37:49 +02004351static void hci_resend_last(struct hci_dev *hdev)
4352{
4353 struct hci_command_hdr *sent;
4354 struct sk_buff *skb;
4355 u16 opcode;
4356
4357 if (!hdev->sent_cmd)
4358 return;
4359
4360 sent = (void *) hdev->sent_cmd->data;
4361 opcode = __le16_to_cpu(sent->opcode);
4362 if (opcode == HCI_OP_RESET)
4363 return;
4364
4365 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4366 if (!skb)
4367 return;
4368
4369 skb_queue_head(&hdev->cmd_q, skb);
4370 queue_work(hdev->workqueue, &hdev->cmd_work);
4371}
4372
Johan Hedberge62144872015-04-02 13:41:08 +03004373void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4374 hci_req_complete_t *req_complete,
4375 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004376{
Johan Hedberg9238f362013-03-05 20:37:48 +02004377 struct sk_buff *skb;
4378 unsigned long flags;
4379
4380 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4381
Johan Hedberg42c6b122013-03-05 20:37:49 +02004382 /* If the completed command doesn't match the last one that was
4383 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004384 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004385 if (!hci_sent_cmd_data(hdev, opcode)) {
4386 /* Some CSR based controllers generate a spontaneous
4387 * reset complete event during init and any pending
4388 * command will never be completed. In such a case we
4389 * need to resend whatever was the last sent
4390 * command.
4391 */
4392 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4393 hci_resend_last(hdev);
4394
Johan Hedberg9238f362013-03-05 20:37:48 +02004395 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004396 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004397
4398 /* If the command succeeded and there's still more commands in
4399 * this request the request is not yet complete.
4400 */
4401 if (!status && !hci_req_is_complete(hdev))
4402 return;
4403
4404 /* If this was the last command in a request the complete
4405 * callback would be found in hdev->sent_cmd instead of the
4406 * command queue (hdev->cmd_q).
4407 */
Johan Hedberge62144872015-04-02 13:41:08 +03004408 if (bt_cb(hdev->sent_cmd)->req.complete) {
4409 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4410 return;
4411 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004412
Johan Hedberge62144872015-04-02 13:41:08 +03004413 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4414 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4415 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004416 }
4417
4418 /* Remove all pending commands belonging to this request */
4419 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4420 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03004421 if (bt_cb(skb)->req.start) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004422 __skb_queue_head(&hdev->cmd_q, skb);
4423 break;
4424 }
4425
Johan Hedberge62144872015-04-02 13:41:08 +03004426 *req_complete = bt_cb(skb)->req.complete;
4427 *req_complete_skb = bt_cb(skb)->req.complete_skb;
Johan Hedberg9238f362013-03-05 20:37:48 +02004428 kfree_skb(skb);
4429 }
4430 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004431}
4432
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004433static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004435 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436 struct sk_buff *skb;
4437
4438 BT_DBG("%s", hdev->name);
4439
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004441 /* Send copy to monitor */
4442 hci_send_to_monitor(hdev, skb);
4443
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444 if (atomic_read(&hdev->promisc)) {
4445 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004446 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447 }
4448
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004449 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450 kfree_skb(skb);
4451 continue;
4452 }
4453
4454 if (test_bit(HCI_INIT, &hdev->flags)) {
4455 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004456 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 case HCI_ACLDATA_PKT:
4458 case HCI_SCODATA_PKT:
4459 kfree_skb(skb);
4460 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004461 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462 }
4463
4464 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004465 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004467 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468 hci_event_packet(hdev, skb);
4469 break;
4470
4471 case HCI_ACLDATA_PKT:
4472 BT_DBG("%s ACL data packet", hdev->name);
4473 hci_acldata_packet(hdev, skb);
4474 break;
4475
4476 case HCI_SCODATA_PKT:
4477 BT_DBG("%s SCO data packet", hdev->name);
4478 hci_scodata_packet(hdev, skb);
4479 break;
4480
4481 default:
4482 kfree_skb(skb);
4483 break;
4484 }
4485 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486}
4487
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004488static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004490 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491 struct sk_buff *skb;
4492
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004493 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4494 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004497 if (atomic_read(&hdev->cmd_cnt)) {
4498 skb = skb_dequeue(&hdev->cmd_q);
4499 if (!skb)
4500 return;
4501
Wei Yongjun7585b972009-02-25 18:29:52 +08004502 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004504 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004505 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004507 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004508 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004509 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004510 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004511 schedule_delayed_work(&hdev->cmd_timer,
4512 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 } else {
4514 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004515 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516 }
4517 }
4518}