blob: 556c173ccbc6925b29066aec75bd803cf22d85bf [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020054DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070068/* ---- HCI debugfs entries ---- */
69
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070070static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
71 size_t count, loff_t *ppos)
72{
73 struct hci_dev *hdev = file->private_data;
74 char buf[3];
75
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070076 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077 buf[1] = '\n';
78 buf[2] = '\0';
79 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
80}
81
82static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
83 size_t count, loff_t *ppos)
84{
85 struct hci_dev *hdev = file->private_data;
86 struct sk_buff *skb;
87 char buf[32];
88 size_t buf_size = min(count, (sizeof(buf)-1));
89 bool enable;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070090
91 if (!test_bit(HCI_UP, &hdev->flags))
92 return -ENETDOWN;
93
94 if (copy_from_user(buf, user_buf, buf_size))
95 return -EFAULT;
96
97 buf[buf_size] = '\0';
98 if (strtobool(buf, &enable))
99 return -EINVAL;
100
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700101 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700102 return -EALREADY;
103
104 hci_req_lock(hdev);
105 if (enable)
106 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 else
109 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
110 HCI_CMD_TIMEOUT);
111 hci_req_unlock(hdev);
112
113 if (IS_ERR(skb))
114 return PTR_ERR(skb);
115
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700116 kfree_skb(skb);
117
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700118 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200130static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
131 size_t count, loff_t *ppos)
132{
133 struct hci_dev *hdev = file->private_data;
134 char buf[3];
135
136 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
137 buf[1] = '\n';
138 buf[2] = '\0';
139 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
140}
141
142static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
143 size_t count, loff_t *ppos)
144{
145 struct hci_dev *hdev = file->private_data;
146 char buf[32];
147 size_t buf_size = min(count, (sizeof(buf)-1));
148 bool enable;
149 int err;
150
151 if (copy_from_user(buf, user_buf, buf_size))
152 return -EFAULT;
153
154 buf[buf_size] = '\0';
155 if (strtobool(buf, &enable))
156 return -EINVAL;
157
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200158 /* When the diagnostic flags are not persistent and the transport
159 * is not active, then there is no need for the vendor callback.
160 *
161 * Instead just store the desired value. If needed the setting
162 * will be programmed when the controller gets powered on.
163 */
164 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
165 !test_bit(HCI_RUNNING, &hdev->flags))
166 goto done;
167
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200168 hci_req_lock(hdev);
169 err = hdev->set_diag(hdev, enable);
170 hci_req_unlock(hdev);
171
172 if (err < 0)
173 return err;
174
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200175done:
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200176 if (enable)
177 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
178 else
179 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
180
181 return count;
182}
183
184static const struct file_operations vendor_diag_fops = {
185 .open = simple_open,
186 .read = vendor_diag_read,
187 .write = vendor_diag_write,
188 .llseek = default_llseek,
189};
190
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200191static void hci_debugfs_create_basic(struct hci_dev *hdev)
192{
193 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
194 &dut_mode_fops);
195
196 if (hdev->set_diag)
197 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
198 &vendor_diag_fops);
199}
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201/* ---- HCI requests ---- */
202
Johan Hedbergf60cb302015-04-02 13:41:09 +0300203static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
204 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200206 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208 if (hdev->req_status == HCI_REQ_PEND) {
209 hdev->req_result = result;
210 hdev->req_status = HCI_REQ_DONE;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300211 if (skb)
212 hdev->req_skb = skb_get(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 wake_up_interruptible(&hdev->req_wait_q);
214 }
215}
216
217static void hci_req_cancel(struct hci_dev *hdev, int err)
218{
219 BT_DBG("%s err 0x%2.2x", hdev->name, err);
220
221 if (hdev->req_status == HCI_REQ_PEND) {
222 hdev->req_result = err;
223 hdev->req_status = HCI_REQ_CANCELED;
224 wake_up_interruptible(&hdev->req_wait_q);
225 }
226}
227
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300228struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300229 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300230{
231 DECLARE_WAITQUEUE(wait, current);
232 struct hci_request req;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300233 struct sk_buff *skb;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300234 int err = 0;
235
236 BT_DBG("%s", hdev->name);
237
238 hci_req_init(&req, hdev);
239
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300240 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300241
242 hdev->req_status = HCI_REQ_PEND;
243
Johan Hedberg75e84b72013-04-02 13:35:04 +0300244 add_wait_queue(&hdev->req_wait_q, &wait);
245 set_current_state(TASK_INTERRUPTIBLE);
246
Johan Hedbergf60cb302015-04-02 13:41:09 +0300247 err = hci_req_run_skb(&req, hci_req_sync_complete);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900248 if (err < 0) {
249 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200250 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900251 return ERR_PTR(err);
252 }
253
Johan Hedberg75e84b72013-04-02 13:35:04 +0300254 schedule_timeout(timeout);
255
256 remove_wait_queue(&hdev->req_wait_q, &wait);
257
258 if (signal_pending(current))
259 return ERR_PTR(-EINTR);
260
261 switch (hdev->req_status) {
262 case HCI_REQ_DONE:
263 err = -bt_to_errno(hdev->req_result);
264 break;
265
266 case HCI_REQ_CANCELED:
267 err = -hdev->req_result;
268 break;
269
270 default:
271 err = -ETIMEDOUT;
272 break;
273 }
274
275 hdev->req_status = hdev->req_result = 0;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300276 skb = hdev->req_skb;
277 hdev->req_skb = NULL;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300278
279 BT_DBG("%s end: err %d", hdev->name, err);
280
Johan Hedbergf60cb302015-04-02 13:41:09 +0300281 if (err < 0) {
282 kfree_skb(skb);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300283 return ERR_PTR(err);
Johan Hedbergf60cb302015-04-02 13:41:09 +0300284 }
Johan Hedberg75e84b72013-04-02 13:35:04 +0300285
Johan Hedberg757aa0b2015-04-02 13:41:12 +0300286 if (!skb)
287 return ERR_PTR(-ENODATA);
288
289 return skb;
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300290}
291EXPORT_SYMBOL(__hci_cmd_sync_ev);
292
293struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300294 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300295{
296 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300297}
298EXPORT_SYMBOL(__hci_cmd_sync);
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200301static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200302 void (*func)(struct hci_request *req,
303 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200304 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200306 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 DECLARE_WAITQUEUE(wait, current);
308 int err = 0;
309
310 BT_DBG("%s start", hdev->name);
311
Johan Hedberg42c6b122013-03-05 20:37:49 +0200312 hci_req_init(&req, hdev);
313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 hdev->req_status = HCI_REQ_PEND;
315
Johan Hedberg42c6b122013-03-05 20:37:49 +0200316 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200317
Chan-yeol Park039fada2014-10-31 14:23:06 +0900318 add_wait_queue(&hdev->req_wait_q, &wait);
319 set_current_state(TASK_INTERRUPTIBLE);
320
Johan Hedbergf60cb302015-04-02 13:41:09 +0300321 err = hci_req_run_skb(&req, hci_req_sync_complete);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200322 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200323 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300324
Chan-yeol Park039fada2014-10-31 14:23:06 +0900325 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200326 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900327
Andre Guedes920c8302013-03-08 11:20:15 -0300328 /* ENODATA means the HCI request command queue is empty.
329 * This can happen when a request with conditionals doesn't
330 * trigger any commands to be sent. This is normal behavior
331 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200332 */
Andre Guedes920c8302013-03-08 11:20:15 -0300333 if (err == -ENODATA)
334 return 0;
335
336 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200337 }
338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 schedule_timeout(timeout);
340
341 remove_wait_queue(&hdev->req_wait_q, &wait);
342
343 if (signal_pending(current))
344 return -EINTR;
345
346 switch (hdev->req_status) {
347 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700348 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 break;
350
351 case HCI_REQ_CANCELED:
352 err = -hdev->req_result;
353 break;
354
355 default:
356 err = -ETIMEDOUT;
357 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700358 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
Johan Hedberga5040ef2011-01-10 13:28:59 +0200360 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
362 BT_DBG("%s end: err %d", hdev->name, err);
363
364 return err;
365}
366
Johan Hedberg01178cd2013-03-05 20:37:41 +0200367static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200368 void (*req)(struct hci_request *req,
369 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200370 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
372 int ret;
373
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200374 if (!test_bit(HCI_UP, &hdev->flags))
375 return -ENETDOWN;
376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 /* Serialize all requests */
378 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200379 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 hci_req_unlock(hdev);
381
382 return ret;
383}
384
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 set_bit(HCI_RESET, &req->hdev->flags);
391 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392}
393
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200396 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200399 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200401 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200402 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200403
404 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200405 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406}
407
Johan Hedberg0af801b2015-02-17 15:05:21 +0200408static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200409{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200411
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200412 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200413 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300414
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700415 /* Read Local Supported Commands */
416 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
417
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300418 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300420
421 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200422 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700423
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700424 /* Read Flow Control Mode */
425 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
426
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700427 /* Read Location Data */
428 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200429}
430
Johan Hedberg0af801b2015-02-17 15:05:21 +0200431static void amp_init2(struct hci_request *req)
432{
433 /* Read Local Supported Features. Not all AMP controllers
434 * support this so it's placed conditionally in the second
435 * stage init.
436 */
437 if (req->hdev->commands[14] & 0x20)
438 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
439}
440
Johan Hedberg42c6b122013-03-05 20:37:49 +0200441static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200442{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200443 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200444
445 BT_DBG("%s %ld", hdev->name, opt);
446
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300447 /* Reset */
448 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200449 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300450
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200451 switch (hdev->dev_type) {
452 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200454 break;
455
456 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200457 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200458 break;
459
460 default:
461 BT_ERR("Unknown device type %d", hdev->dev_type);
462 break;
463 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200464}
465
Johan Hedberg42c6b122013-03-05 20:37:49 +0200466static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200468 __le16 param;
469 __u8 flt_type;
470
471 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200472 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200473
474 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200475 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200476
477 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200478 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200479
480 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700483 /* Read Number of Supported IAC */
484 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
485
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700486 /* Read Current IAC LAP */
487 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
488
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489 /* Clear Event Filters */
490 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200491 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200492
493 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700494 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200495 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200496}
497
Johan Hedberg42c6b122013-03-05 20:37:49 +0200498static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200499{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300500 struct hci_dev *hdev = req->hdev;
501
Johan Hedberg2177bab2013-03-05 20:37:43 +0200502 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200503 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200504
505 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800508 /* Read LE Supported States */
509 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
510
Johan Hedbergc73eee92013-04-19 18:35:21 +0300511 /* LE-only controllers have LE implicitly enabled */
512 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700513 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 /* The second byte is 0xff instead of 0x9f (two reserved bits
521 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
522 * command otherwise.
523 */
524 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
525
526 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
527 * any event mask for pre 1.2 devices.
528 */
529 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
530 return;
531
532 if (lmp_bredr_capable(hdev)) {
533 events[4] |= 0x01; /* Flow Specification Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700534 } else {
535 /* Use a different default for LE-only devices */
536 memset(events, 0, sizeof(events));
537 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700538 events[1] |= 0x08; /* Read Remote Version Information Complete */
539 events[1] |= 0x20; /* Command Complete */
540 events[1] |= 0x40; /* Command Status */
541 events[1] |= 0x80; /* Hardware Error */
542 events[2] |= 0x04; /* Number of Completed Packets */
543 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200544
545 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
546 events[0] |= 0x80; /* Encryption Change */
547 events[5] |= 0x80; /* Encryption Key Refresh Complete */
548 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200549 }
550
Marcel Holtmann9fe759c2015-11-01 09:45:22 +0100551 if (lmp_inq_rssi_capable(hdev) ||
552 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
Johan Hedberg2177bab2013-03-05 20:37:43 +0200553 events[4] |= 0x02; /* Inquiry Result with RSSI */
554
Marcel Holtmann70f56aa2015-11-01 09:39:49 +0100555 if (lmp_ext_feat_capable(hdev))
556 events[4] |= 0x04; /* Read Remote Extended Features Complete */
557
558 if (lmp_esco_capable(hdev)) {
559 events[5] |= 0x08; /* Synchronous Connection Complete */
560 events[5] |= 0x10; /* Synchronous Connection Changed */
561 }
562
Johan Hedberg2177bab2013-03-05 20:37:43 +0200563 if (lmp_sniffsubr_capable(hdev))
564 events[5] |= 0x20; /* Sniff Subrating */
565
566 if (lmp_pause_enc_capable(hdev))
567 events[5] |= 0x80; /* Encryption Key Refresh Complete */
568
569 if (lmp_ext_inq_capable(hdev))
570 events[5] |= 0x40; /* Extended Inquiry Result */
571
572 if (lmp_no_flush_capable(hdev))
573 events[7] |= 0x01; /* Enhanced Flush Complete */
574
575 if (lmp_lsto_capable(hdev))
576 events[6] |= 0x80; /* Link Supervision Timeout Changed */
577
578 if (lmp_ssp_capable(hdev)) {
579 events[6] |= 0x01; /* IO Capability Request */
580 events[6] |= 0x02; /* IO Capability Response */
581 events[6] |= 0x04; /* User Confirmation Request */
582 events[6] |= 0x08; /* User Passkey Request */
583 events[6] |= 0x10; /* Remote OOB Data Request */
584 events[6] |= 0x20; /* Simple Pairing Complete */
585 events[7] |= 0x04; /* User Passkey Notification */
586 events[7] |= 0x08; /* Keypress Notification */
587 events[7] |= 0x10; /* Remote Host Supported
588 * Features Notification
589 */
590 }
591
592 if (lmp_le_capable(hdev))
593 events[7] |= 0x20; /* LE Meta-Event */
594
Johan Hedberg42c6b122013-03-05 20:37:49 +0200595 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200596}
597
Johan Hedberg42c6b122013-03-05 20:37:49 +0200598static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200599{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200600 struct hci_dev *hdev = req->hdev;
601
Johan Hedberg0af801b2015-02-17 15:05:21 +0200602 if (hdev->dev_type == HCI_AMP)
603 return amp_init2(req);
604
Johan Hedberg2177bab2013-03-05 20:37:43 +0200605 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300607 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700608 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200609
610 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200611 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200612
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100613 /* All Bluetooth 1.2 and later controllers should support the
614 * HCI command for reading the local supported commands.
615 *
616 * Unfortunately some controllers indicate Bluetooth 1.2 support,
617 * but do not have support for this command. If that is the case,
618 * the driver can quirk the behavior and skip reading the local
619 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300620 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100621 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
622 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200623 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200624
625 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700626 /* When SSP is available, then the host features page
627 * should also be available as well. However some
628 * controllers list the max_page as 0 as long as SSP
629 * has not been enabled. To achieve proper debugging
630 * output, force the minimum max_page to 1 at least.
631 */
632 hdev->max_page = 0x01;
633
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700634 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200635 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800636
Johan Hedberg42c6b122013-03-05 20:37:49 +0200637 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
638 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200639 } else {
640 struct hci_cp_write_eir cp;
641
642 memset(hdev->eir, 0, sizeof(hdev->eir));
643 memset(&cp, 0, sizeof(cp));
644
Johan Hedberg42c6b122013-03-05 20:37:49 +0200645 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200646 }
647 }
648
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800649 if (lmp_inq_rssi_capable(hdev) ||
650 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800651 u8 mode;
652
653 /* If Extended Inquiry Result events are supported, then
654 * they are clearly preferred over Inquiry Result with RSSI
655 * events.
656 */
657 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
658
659 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
660 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200661
662 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200663 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200664
665 if (lmp_ext_feat_capable(hdev)) {
666 struct hci_cp_read_local_ext_features cp;
667
668 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200669 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
670 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200671 }
672
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700673 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200674 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200675 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
676 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677 }
678}
679
Johan Hedberg42c6b122013-03-05 20:37:49 +0200680static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200681{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200682 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200683 struct hci_cp_write_def_link_policy cp;
684 u16 link_policy = 0;
685
686 if (lmp_rswitch_capable(hdev))
687 link_policy |= HCI_LP_RSWITCH;
688 if (lmp_hold_capable(hdev))
689 link_policy |= HCI_LP_HOLD;
690 if (lmp_sniff_capable(hdev))
691 link_policy |= HCI_LP_SNIFF;
692 if (lmp_park_capable(hdev))
693 link_policy |= HCI_LP_PARK;
694
695 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200696 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200697}
698
Johan Hedberg42c6b122013-03-05 20:37:49 +0200699static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200700{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200701 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200702 struct hci_cp_write_le_host_supported cp;
703
Johan Hedbergc73eee92013-04-19 18:35:21 +0300704 /* LE-only devices do not support explicit enablement */
705 if (!lmp_bredr_capable(hdev))
706 return;
707
Johan Hedberg2177bab2013-03-05 20:37:43 +0200708 memset(&cp, 0, sizeof(cp));
709
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700710 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200711 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200712 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200713 }
714
715 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200716 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
717 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200718}
719
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300720static void hci_set_event_mask_page_2(struct hci_request *req)
721{
722 struct hci_dev *hdev = req->hdev;
723 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
724
725 /* If Connectionless Slave Broadcast master role is supported
726 * enable all necessary events for it.
727 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800728 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300729 events[1] |= 0x40; /* Triggered Clock Capture */
730 events[1] |= 0x80; /* Synchronization Train Complete */
731 events[2] |= 0x10; /* Slave Page Response Timeout */
732 events[2] |= 0x20; /* CSB Channel Map Change */
733 }
734
735 /* If Connectionless Slave Broadcast slave role is supported
736 * enable all necessary events for it.
737 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800738 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300739 events[2] |= 0x01; /* Synchronization Train Received */
740 events[2] |= 0x02; /* CSB Receive */
741 events[2] |= 0x04; /* CSB Timeout */
742 events[2] |= 0x08; /* Truncated Page Complete */
743 }
744
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800745 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200746 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800747 events[2] |= 0x80;
748
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300749 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
750}
751
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200753{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200754 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300755 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200757 hci_setup_event_mask(req);
758
Johan Hedberge81be902015-08-30 21:47:20 +0300759 if (hdev->commands[6] & 0x20 &&
760 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800761 struct hci_cp_read_stored_link_key cp;
762
763 bacpy(&cp.bdaddr, BDADDR_ANY);
764 cp.read_all = 0x01;
765 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
766 }
767
Johan Hedberg2177bab2013-03-05 20:37:43 +0200768 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200769 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200770
Marcel Holtmann417287d2014-12-11 20:21:54 +0100771 if (hdev->commands[8] & 0x01)
772 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
773
774 /* Some older Broadcom based Bluetooth 1.2 controllers do not
775 * support the Read Page Scan Type command. Check support for
776 * this command in the bit mask of supported commands.
777 */
778 if (hdev->commands[13] & 0x01)
779 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
780
Andre Guedes9193c6e2014-07-01 18:10:09 -0300781 if (lmp_le_capable(hdev)) {
782 u8 events[8];
783
784 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200785
786 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
787 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300788
789 /* If controller supports the Connection Parameters Request
790 * Link Layer Procedure, enable the corresponding event.
791 */
792 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
793 events[0] |= 0x20; /* LE Remote Connection
794 * Parameter Request
795 */
796
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100797 /* If the controller supports the Data Length Extension
798 * feature, enable the corresponding event.
799 */
800 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
801 events[0] |= 0x40; /* LE Data Length Change */
802
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100803 /* If the controller supports Extended Scanner Filter
804 * Policies, enable the correspondig event.
805 */
806 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
807 events[1] |= 0x04; /* LE Direct Advertising
808 * Report
809 */
810
Marcel Holtmann7d26f5c2015-11-01 09:39:51 +0100811 /* If the controller supports the LE Set Scan Enable command,
812 * enable the corresponding advertising report event.
813 */
814 if (hdev->commands[26] & 0x08)
815 events[0] |= 0x02; /* LE Advertising Report */
816
817 /* If the controller supports the LE Create Connection
818 * command, enable the corresponding event.
819 */
820 if (hdev->commands[26] & 0x10)
821 events[0] |= 0x01; /* LE Connection Complete */
822
823 /* If the controller supports the LE Connection Update
824 * command, enable the corresponding event.
825 */
826 if (hdev->commands[27] & 0x04)
827 events[0] |= 0x04; /* LE Connection Update
828 * Complete
829 */
830
831 /* If the controller supports the LE Read Remote Used Features
832 * command, enable the corresponding event.
833 */
834 if (hdev->commands[27] & 0x20)
835 events[0] |= 0x08; /* LE Read Remote Used
836 * Features Complete
837 */
838
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100839 /* If the controller supports the LE Read Local P-256
840 * Public Key command, enable the corresponding event.
841 */
842 if (hdev->commands[34] & 0x02)
843 events[0] |= 0x80; /* LE Read Local P-256
844 * Public Key Complete
845 */
846
847 /* If the controller supports the LE Generate DHKey
848 * command, enable the corresponding event.
849 */
850 if (hdev->commands[34] & 0x04)
851 events[1] |= 0x01; /* LE Generate DHKey Complete */
852
Andre Guedes9193c6e2014-07-01 18:10:09 -0300853 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
854 events);
855
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200856 if (hdev->commands[25] & 0x40) {
857 /* Read LE Advertising Channel TX Power */
858 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
859 }
860
Marcel Holtmann2ab216a2015-11-01 09:39:48 +0100861 if (hdev->commands[26] & 0x40) {
862 /* Read LE White List Size */
863 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
864 0, NULL);
865 }
866
867 if (hdev->commands[26] & 0x80) {
868 /* Clear LE White List */
869 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
870 }
871
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100872 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
873 /* Read LE Maximum Data Length */
874 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
875
876 /* Read LE Suggested Default Data Length */
877 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
878 }
879
Johan Hedberg42c6b122013-03-05 20:37:49 +0200880 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300881 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300882
883 /* Read features beyond page 1 if available */
884 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
885 struct hci_cp_read_local_ext_features cp;
886
887 cp.page = p;
888 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
889 sizeof(cp), &cp);
890 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200891}
892
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300893static void hci_init4_req(struct hci_request *req, unsigned long opt)
894{
895 struct hci_dev *hdev = req->hdev;
896
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800897 /* Some Broadcom based Bluetooth controllers do not support the
898 * Delete Stored Link Key command. They are clearly indicating its
899 * absence in the bit mask of supported commands.
900 *
901 * Check the supported commands and only if the the command is marked
902 * as supported send it. If not supported assume that the controller
903 * does not have actual support for stored link keys which makes this
904 * command redundant anyway.
905 *
906 * Some controllers indicate that they support handling deleting
907 * stored link keys, but they don't. The quirk lets a driver
908 * just disable this command.
909 */
910 if (hdev->commands[6] & 0x80 &&
911 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
912 struct hci_cp_delete_stored_link_key cp;
913
914 bacpy(&cp.bdaddr, BDADDR_ANY);
915 cp.delete_all = 0x01;
916 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
917 sizeof(cp), &cp);
918 }
919
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300920 /* Set event mask page 2 if the HCI command for it is supported */
921 if (hdev->commands[22] & 0x04)
922 hci_set_event_mask_page_2(req);
923
Marcel Holtmann109e3192014-07-23 19:24:56 +0200924 /* Read local codec list if the HCI command is supported */
925 if (hdev->commands[29] & 0x20)
926 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
927
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200928 /* Get MWS transport configuration if the HCI command is supported */
929 if (hdev->commands[30] & 0x08)
930 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
931
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300932 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800933 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300934 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800935
936 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700937 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800938 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800939 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800940
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800941 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
942 sizeof(support), &support);
943 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300944}
945
Johan Hedberg2177bab2013-03-05 20:37:43 +0200946static int __hci_init(struct hci_dev *hdev)
947{
948 int err;
949
950 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
951 if (err < 0)
952 return err;
953
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200954 if (hci_dev_test_flag(hdev, HCI_SETUP))
955 hci_debugfs_create_basic(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700956
Johan Hedberg2177bab2013-03-05 20:37:43 +0200957 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
958 if (err < 0)
959 return err;
960
Johan Hedberg0af801b2015-02-17 15:05:21 +0200961 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
962 * BR/EDR/LE type controllers. AMP controllers only need the
963 * first two stages of init.
964 */
965 if (hdev->dev_type != HCI_BREDR)
966 return 0;
967
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300968 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
969 if (err < 0)
970 return err;
971
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700972 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
973 if (err < 0)
974 return err;
975
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800976 /* This function is only called when the controller is actually in
977 * configured state. When the controller is marked as unconfigured,
978 * this initialization procedure is not run.
979 *
980 * It means that it is possible that a controller runs through its
981 * setup phase and then discovers missing settings. If that is the
982 * case, then this function will not be called. It then will only
983 * be called during the config phase.
984 *
985 * So only when in setup phase or config phase, create the debugfs
986 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700987 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700988 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
989 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700990 return 0;
991
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100992 hci_debugfs_create_common(hdev);
993
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100994 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100995 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700996
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800997 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100998 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700999
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001000 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001001}
1002
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001003static void hci_init0_req(struct hci_request *req, unsigned long opt)
1004{
1005 struct hci_dev *hdev = req->hdev;
1006
1007 BT_DBG("%s %ld", hdev->name, opt);
1008
1009 /* Reset */
1010 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1011 hci_reset_req(req, 0);
1012
1013 /* Read Local Version */
1014 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1015
1016 /* Read BD Address */
1017 if (hdev->set_bdaddr)
1018 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1019}
1020
1021static int __hci_unconf_init(struct hci_dev *hdev)
1022{
1023 int err;
1024
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001025 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1026 return 0;
1027
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001028 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1029 if (err < 0)
1030 return err;
1031
Marcel Holtmannf640ee92015-10-08 12:35:42 +02001032 if (hci_dev_test_flag(hdev, HCI_SETUP))
1033 hci_debugfs_create_basic(hdev);
1034
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001035 return 0;
1036}
1037
Johan Hedberg42c6b122013-03-05 20:37:49 +02001038static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039{
1040 __u8 scan = opt;
1041
Johan Hedberg42c6b122013-03-05 20:37:49 +02001042 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
1044 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001045 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046}
1047
Johan Hedberg42c6b122013-03-05 20:37:49 +02001048static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049{
1050 __u8 auth = opt;
1051
Johan Hedberg42c6b122013-03-05 20:37:49 +02001052 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
1054 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001055 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056}
1057
Johan Hedberg42c6b122013-03-05 20:37:49 +02001058static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059{
1060 __u8 encrypt = opt;
1061
Johan Hedberg42c6b122013-03-05 20:37:49 +02001062 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001064 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001065 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066}
1067
Johan Hedberg42c6b122013-03-05 20:37:49 +02001068static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001069{
1070 __le16 policy = cpu_to_le16(opt);
1071
Johan Hedberg42c6b122013-03-05 20:37:49 +02001072 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001073
1074 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001075 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001076}
1077
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001078/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 * Device is held on return. */
1080struct hci_dev *hci_dev_get(int index)
1081{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001082 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083
1084 BT_DBG("%d", index);
1085
1086 if (index < 0)
1087 return NULL;
1088
1089 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001090 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 if (d->id == index) {
1092 hdev = hci_dev_hold(d);
1093 break;
1094 }
1095 }
1096 read_unlock(&hci_dev_list_lock);
1097 return hdev;
1098}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
1100/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001101
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001102bool hci_discovery_active(struct hci_dev *hdev)
1103{
1104 struct discovery_state *discov = &hdev->discovery;
1105
Andre Guedes6fbe1952012-02-03 17:47:58 -03001106 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001107 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001108 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001109 return true;
1110
Andre Guedes6fbe1952012-02-03 17:47:58 -03001111 default:
1112 return false;
1113 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001114}
1115
Johan Hedbergff9ef572012-01-04 14:23:45 +02001116void hci_discovery_set_state(struct hci_dev *hdev, int state)
1117{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001118 int old_state = hdev->discovery.state;
1119
Johan Hedbergff9ef572012-01-04 14:23:45 +02001120 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1121
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001122 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001123 return;
1124
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001125 hdev->discovery.state = state;
1126
Johan Hedbergff9ef572012-01-04 14:23:45 +02001127 switch (state) {
1128 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001129 hci_update_background_scan(hdev);
1130
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001131 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001132 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001133 break;
1134 case DISCOVERY_STARTING:
1135 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001136 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001137 mgmt_discovering(hdev, 1);
1138 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001139 case DISCOVERY_RESOLVING:
1140 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001141 case DISCOVERY_STOPPING:
1142 break;
1143 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001144}
1145
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001146void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147{
Johan Hedberg30883512012-01-04 14:16:21 +02001148 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001149 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Johan Hedberg561aafb2012-01-04 13:31:59 +02001151 list_for_each_entry_safe(p, n, &cache->all, all) {
1152 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001153 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001155
1156 INIT_LIST_HEAD(&cache->unknown);
1157 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158}
1159
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001160struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1161 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162{
Johan Hedberg30883512012-01-04 14:16:21 +02001163 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 struct inquiry_entry *e;
1165
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001166 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Johan Hedberg561aafb2012-01-04 13:31:59 +02001168 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001170 return e;
1171 }
1172
1173 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174}
1175
Johan Hedberg561aafb2012-01-04 13:31:59 +02001176struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001177 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001178{
Johan Hedberg30883512012-01-04 14:16:21 +02001179 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001180 struct inquiry_entry *e;
1181
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001182 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001183
1184 list_for_each_entry(e, &cache->unknown, list) {
1185 if (!bacmp(&e->data.bdaddr, bdaddr))
1186 return e;
1187 }
1188
1189 return NULL;
1190}
1191
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001192struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001193 bdaddr_t *bdaddr,
1194 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001195{
1196 struct discovery_state *cache = &hdev->discovery;
1197 struct inquiry_entry *e;
1198
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001199 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001200
1201 list_for_each_entry(e, &cache->resolve, list) {
1202 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1203 return e;
1204 if (!bacmp(&e->data.bdaddr, bdaddr))
1205 return e;
1206 }
1207
1208 return NULL;
1209}
1210
Johan Hedberga3d4e202012-01-09 00:53:02 +02001211void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001212 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001213{
1214 struct discovery_state *cache = &hdev->discovery;
1215 struct list_head *pos = &cache->resolve;
1216 struct inquiry_entry *p;
1217
1218 list_del(&ie->list);
1219
1220 list_for_each_entry(p, &cache->resolve, list) {
1221 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001222 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001223 break;
1224 pos = &p->list;
1225 }
1226
1227 list_add(&ie->list, pos);
1228}
1229
Marcel Holtmannaf589252014-07-01 14:11:20 +02001230u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1231 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232{
Johan Hedberg30883512012-01-04 14:16:21 +02001233 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001234 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001235 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001237 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
Johan Hedberg6928a922014-10-26 20:46:09 +01001239 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001240
Marcel Holtmannaf589252014-07-01 14:11:20 +02001241 if (!data->ssp_mode)
1242 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001243
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001244 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001245 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001246 if (!ie->data.ssp_mode)
1247 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001248
Johan Hedberga3d4e202012-01-09 00:53:02 +02001249 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001250 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001251 ie->data.rssi = data->rssi;
1252 hci_inquiry_cache_update_resolve(hdev, ie);
1253 }
1254
Johan Hedberg561aafb2012-01-04 13:31:59 +02001255 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001256 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001257
Johan Hedberg561aafb2012-01-04 13:31:59 +02001258 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001259 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001260 if (!ie) {
1261 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1262 goto done;
1263 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001264
1265 list_add(&ie->all, &cache->all);
1266
1267 if (name_known) {
1268 ie->name_state = NAME_KNOWN;
1269 } else {
1270 ie->name_state = NAME_NOT_KNOWN;
1271 list_add(&ie->list, &cache->unknown);
1272 }
1273
1274update:
1275 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001276 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001277 ie->name_state = NAME_KNOWN;
1278 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 }
1280
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001281 memcpy(&ie->data, data, sizeof(*data));
1282 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001284
1285 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001286 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001287
Marcel Holtmannaf589252014-07-01 14:11:20 +02001288done:
1289 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290}
1291
1292static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1293{
Johan Hedberg30883512012-01-04 14:16:21 +02001294 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 struct inquiry_info *info = (struct inquiry_info *) buf;
1296 struct inquiry_entry *e;
1297 int copied = 0;
1298
Johan Hedberg561aafb2012-01-04 13:31:59 +02001299 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001301
1302 if (copied >= num)
1303 break;
1304
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 bacpy(&info->bdaddr, &data->bdaddr);
1306 info->pscan_rep_mode = data->pscan_rep_mode;
1307 info->pscan_period_mode = data->pscan_period_mode;
1308 info->pscan_mode = data->pscan_mode;
1309 memcpy(info->dev_class, data->dev_class, 3);
1310 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001313 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 }
1315
1316 BT_DBG("cache %p, copied %d", cache, copied);
1317 return copied;
1318}
1319
Johan Hedberg42c6b122013-03-05 20:37:49 +02001320static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321{
1322 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 struct hci_cp_inquiry cp;
1325
1326 BT_DBG("%s", hdev->name);
1327
1328 if (test_bit(HCI_INQUIRY, &hdev->flags))
1329 return;
1330
1331 /* Start Inquiry */
1332 memcpy(&cp.lap, &ir->lap, 3);
1333 cp.length = ir->length;
1334 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001335 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336}
1337
1338int hci_inquiry(void __user *arg)
1339{
1340 __u8 __user *ptr = arg;
1341 struct hci_inquiry_req ir;
1342 struct hci_dev *hdev;
1343 int err = 0, do_inquiry = 0, max_rsp;
1344 long timeo;
1345 __u8 *buf;
1346
1347 if (copy_from_user(&ir, ptr, sizeof(ir)))
1348 return -EFAULT;
1349
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001350 hdev = hci_dev_get(ir.dev_id);
1351 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 return -ENODEV;
1353
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001354 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001355 err = -EBUSY;
1356 goto done;
1357 }
1358
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001359 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001360 err = -EOPNOTSUPP;
1361 goto done;
1362 }
1363
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001364 if (hdev->dev_type != HCI_BREDR) {
1365 err = -EOPNOTSUPP;
1366 goto done;
1367 }
1368
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001369 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001370 err = -EOPNOTSUPP;
1371 goto done;
1372 }
1373
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001374 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001375 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001376 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001377 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 do_inquiry = 1;
1379 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001380 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
Marcel Holtmann04837f62006-07-03 10:02:33 +02001382 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001383
1384 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001385 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1386 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001387 if (err < 0)
1388 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001389
1390 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1391 * cleared). If it is interrupted by a signal, return -EINTR.
1392 */
NeilBrown74316202014-07-07 15:16:04 +10001393 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001394 TASK_INTERRUPTIBLE))
1395 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001398 /* for unlimited number of responses we will use buffer with
1399 * 255 entries
1400 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1402
1403 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1404 * copy it to the user space.
1405 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001406 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001407 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 err = -ENOMEM;
1409 goto done;
1410 }
1411
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001412 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001414 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
1416 BT_DBG("num_rsp %d", ir.num_rsp);
1417
1418 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1419 ptr += sizeof(ir);
1420 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001421 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001423 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 err = -EFAULT;
1425
1426 kfree(buf);
1427
1428done:
1429 hci_dev_put(hdev);
1430 return err;
1431}
1432
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001433static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 int ret = 0;
1436
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 BT_DBG("%s %p", hdev->name, hdev);
1438
1439 hci_req_lock(hdev);
1440
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001441 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001442 ret = -ENODEV;
1443 goto done;
1444 }
1445
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001446 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1447 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001448 /* Check for rfkill but allow the HCI setup stage to
1449 * proceed (which in itself doesn't cause any RF activity).
1450 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001451 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001452 ret = -ERFKILL;
1453 goto done;
1454 }
1455
1456 /* Check for valid public address or a configured static
1457 * random adddress, but let the HCI setup proceed to
1458 * be able to determine if there is a public address
1459 * or not.
1460 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001461 * In case of user channel usage, it is not important
1462 * if a public address or static random address is
1463 * available.
1464 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001465 * This check is only valid for BR/EDR controllers
1466 * since AMP controllers do not have an address.
1467 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001468 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001469 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001470 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1471 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1472 ret = -EADDRNOTAVAIL;
1473 goto done;
1474 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001475 }
1476
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 if (test_bit(HCI_UP, &hdev->flags)) {
1478 ret = -EALREADY;
1479 goto done;
1480 }
1481
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 if (hdev->open(hdev)) {
1483 ret = -EIO;
1484 goto done;
1485 }
1486
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001487 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001488 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001489
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001490 atomic_set(&hdev->cmd_cnt, 1);
1491 set_bit(HCI_INIT, &hdev->flags);
1492
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001493 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmanne131d742015-10-20 02:30:47 +02001494 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1495
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001496 if (hdev->setup)
1497 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001498
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001499 /* The transport driver can set these quirks before
1500 * creating the HCI device or in its setup callback.
1501 *
1502 * In case any of them is set, the controller has to
1503 * start up as unconfigured.
1504 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001505 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1506 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001507 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001508
1509 /* For an unconfigured controller it is required to
1510 * read at least the version information provided by
1511 * the Read Local Version Information command.
1512 *
1513 * If the set_bdaddr driver callback is provided, then
1514 * also the original Bluetooth public device address
1515 * will be read using the Read BD Address command.
1516 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001517 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001518 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001519 }
1520
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001521 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001522 /* If public address change is configured, ensure that
1523 * the address gets programmed. If the driver does not
1524 * support changing the public address, fail the power
1525 * on procedure.
1526 */
1527 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1528 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001529 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1530 else
1531 ret = -EADDRNOTAVAIL;
1532 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001533
1534 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001535 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001536 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001537 ret = __hci_init(hdev);
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001538 if (!ret && hdev->post_init)
1539 ret = hdev->post_init(hdev);
1540 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 }
1542
Marcel Holtmann7e995b92015-10-17 16:00:26 +02001543 /* If the HCI Reset command is clearing all diagnostic settings,
1544 * then they need to be reprogrammed after the init procedure
1545 * completed.
1546 */
1547 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1548 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1549 ret = hdev->set_diag(hdev, true);
1550
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001551 clear_bit(HCI_INIT, &hdev->flags);
1552
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 if (!ret) {
1554 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001555 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 set_bit(HCI_UP, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001557 hci_sock_dev_event(hdev, HCI_DEV_UP);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001558 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1559 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1560 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1561 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001562 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001563 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001564 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001565 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001566 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001567 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001569 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001570 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001571 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572
1573 skb_queue_purge(&hdev->cmd_q);
1574 skb_queue_purge(&hdev->rx_q);
1575
1576 if (hdev->flush)
1577 hdev->flush(hdev);
1578
1579 if (hdev->sent_cmd) {
1580 kfree_skb(hdev->sent_cmd);
1581 hdev->sent_cmd = NULL;
1582 }
1583
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001584 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001585 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001586
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001588 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 }
1590
1591done:
1592 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 return ret;
1594}
1595
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001596/* ---- HCI ioctl helpers ---- */
1597
1598int hci_dev_open(__u16 dev)
1599{
1600 struct hci_dev *hdev;
1601 int err;
1602
1603 hdev = hci_dev_get(dev);
1604 if (!hdev)
1605 return -ENODEV;
1606
Marcel Holtmann4a964402014-07-02 19:10:33 +02001607 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001608 * up as user channel. Trying to bring them up as normal devices
1609 * will result into a failure. Only user channel operation is
1610 * possible.
1611 *
1612 * When this function is called for a user channel, the flag
1613 * HCI_USER_CHANNEL will be set first before attempting to
1614 * open the device.
1615 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001616 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1617 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001618 err = -EOPNOTSUPP;
1619 goto done;
1620 }
1621
Johan Hedberge1d08f42013-10-01 22:44:50 +03001622 /* We need to ensure that no other power on/off work is pending
1623 * before proceeding to call hci_dev_do_open. This is
1624 * particularly important if the setup procedure has not yet
1625 * completed.
1626 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001627 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001628 cancel_delayed_work(&hdev->power_off);
1629
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001630 /* After this call it is guaranteed that the setup procedure
1631 * has finished. This means that error conditions like RFKILL
1632 * or no valid public or static random address apply.
1633 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001634 flush_workqueue(hdev->req_workqueue);
1635
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001636 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001637 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001638 * so that pairing works for them. Once the management interface
1639 * is in use this bit will be cleared again and userspace has
1640 * to explicitly enable it.
1641 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001642 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1643 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001644 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001645
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001646 err = hci_dev_do_open(hdev);
1647
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001648done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001649 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001650 return err;
1651}
1652
Johan Hedbergd7347f32014-07-04 12:37:23 +03001653/* This function requires the caller holds hdev->lock */
1654static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1655{
1656 struct hci_conn_params *p;
1657
Johan Hedbergf161dd42014-08-15 21:06:54 +03001658 list_for_each_entry(p, &hdev->le_conn_params, list) {
1659 if (p->conn) {
1660 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001661 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001662 p->conn = NULL;
1663 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001664 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001665 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001666
1667 BT_DBG("All LE pending actions cleared");
1668}
1669
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001670int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671{
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001672 bool auto_off;
1673
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 BT_DBG("%s %p", hdev->name, hdev);
1675
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001676 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001677 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001678 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001679 /* Execute vendor specific shutdown routine */
1680 if (hdev->shutdown)
1681 hdev->shutdown(hdev);
1682 }
1683
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001684 cancel_delayed_work(&hdev->power_off);
1685
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 hci_req_cancel(hdev, ENODEV);
1687 hci_req_lock(hdev);
1688
1689 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001690 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 hci_req_unlock(hdev);
1692 return 0;
1693 }
1694
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001695 /* Flush RX and TX works */
1696 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001697 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001699 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001700 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001701 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001702 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1703 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001704 }
1705
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001706 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001707 cancel_delayed_work(&hdev->service_cache);
1708
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001709 cancel_delayed_work_sync(&hdev->le_scan_disable);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08001710 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001711
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001712 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001713 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001714
Florian Grandel5d900e42015-06-18 03:16:35 +02001715 if (hdev->adv_instance_timeout) {
1716 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1717 hdev->adv_instance_timeout = 0;
1718 }
1719
Johan Hedberg76727c02014-11-18 09:00:14 +02001720 /* Avoid potential lockdep warnings from the *_flush() calls by
1721 * ensuring the workqueue is empty up front.
1722 */
1723 drain_workqueue(hdev->workqueue);
1724
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001725 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001726
Johan Hedberg8f502f82015-01-28 19:56:02 +02001727 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1728
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001729 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1730
1731 if (!auto_off && hdev->dev_type == HCI_BREDR)
1732 mgmt_powered(hdev, 0);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001733
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001734 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001735 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001736 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001737 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
Marcel Holtmann64dae962015-01-28 14:10:28 -08001739 smp_unregister(hdev);
1740
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001741 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
1743 if (hdev->flush)
1744 hdev->flush(hdev);
1745
1746 /* Reset device */
1747 skb_queue_purge(&hdev->cmd_q);
1748 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001749 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1750 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001752 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 clear_bit(HCI_INIT, &hdev->flags);
1754 }
1755
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001756 /* flush cmd work */
1757 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758
1759 /* Drop queues */
1760 skb_queue_purge(&hdev->rx_q);
1761 skb_queue_purge(&hdev->cmd_q);
1762 skb_queue_purge(&hdev->raw_q);
1763
1764 /* Drop last sent command */
1765 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001766 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 kfree_skb(hdev->sent_cmd);
1768 hdev->sent_cmd = NULL;
1769 }
1770
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001771 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001772 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001773
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 /* After this point our queues are empty
1775 * and no tasks are scheduled. */
1776 hdev->close(hdev);
1777
Johan Hedberg35b973c2013-03-15 17:06:59 -05001778 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001779 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001780 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001781
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001782 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001783 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001784
Johan Hedberge59fda82012-02-22 18:11:53 +02001785 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001786 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001787 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 hci_req_unlock(hdev);
1790
1791 hci_dev_put(hdev);
1792 return 0;
1793}
1794
1795int hci_dev_close(__u16 dev)
1796{
1797 struct hci_dev *hdev;
1798 int err;
1799
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001800 hdev = hci_dev_get(dev);
1801 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001803
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001804 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001805 err = -EBUSY;
1806 goto done;
1807 }
1808
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001809 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001810 cancel_delayed_work(&hdev->power_off);
1811
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001813
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001814done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 hci_dev_put(hdev);
1816 return err;
1817}
1818
Marcel Holtmann5c912492015-01-28 11:53:05 -08001819static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001821 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
Marcel Holtmann5c912492015-01-28 11:53:05 -08001823 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
1825 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 /* Drop queues */
1828 skb_queue_purge(&hdev->rx_q);
1829 skb_queue_purge(&hdev->cmd_q);
1830
Johan Hedberg76727c02014-11-18 09:00:14 +02001831 /* Avoid potential lockdep warnings from the *_flush() calls by
1832 * ensuring the workqueue is empty up front.
1833 */
1834 drain_workqueue(hdev->workqueue);
1835
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001836 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001837 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001839 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840
1841 if (hdev->flush)
1842 hdev->flush(hdev);
1843
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001844 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001845 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001847 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 return ret;
1851}
1852
Marcel Holtmann5c912492015-01-28 11:53:05 -08001853int hci_dev_reset(__u16 dev)
1854{
1855 struct hci_dev *hdev;
1856 int err;
1857
1858 hdev = hci_dev_get(dev);
1859 if (!hdev)
1860 return -ENODEV;
1861
1862 if (!test_bit(HCI_UP, &hdev->flags)) {
1863 err = -ENETDOWN;
1864 goto done;
1865 }
1866
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001867 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001868 err = -EBUSY;
1869 goto done;
1870 }
1871
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001872 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001873 err = -EOPNOTSUPP;
1874 goto done;
1875 }
1876
1877 err = hci_dev_do_reset(hdev);
1878
1879done:
1880 hci_dev_put(hdev);
1881 return err;
1882}
1883
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884int hci_dev_reset_stat(__u16 dev)
1885{
1886 struct hci_dev *hdev;
1887 int ret = 0;
1888
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001889 hdev = hci_dev_get(dev);
1890 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 return -ENODEV;
1892
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001893 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001894 ret = -EBUSY;
1895 goto done;
1896 }
1897
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001898 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001899 ret = -EOPNOTSUPP;
1900 goto done;
1901 }
1902
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1904
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001905done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 return ret;
1908}
1909
Johan Hedberg123abc02014-07-10 12:09:07 +03001910static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1911{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001912 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001913
1914 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1915
1916 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001917 conn_changed = !hci_dev_test_and_set_flag(hdev,
1918 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001919 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001920 conn_changed = hci_dev_test_and_clear_flag(hdev,
1921 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001922
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001923 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001924 discov_changed = !hci_dev_test_and_set_flag(hdev,
1925 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001926 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001927 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001928 discov_changed = hci_dev_test_and_clear_flag(hdev,
1929 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001930 }
1931
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001932 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001933 return;
1934
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001935 if (conn_changed || discov_changed) {
1936 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001937 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001938
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001939 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001940 mgmt_update_adv_data(hdev);
1941
Johan Hedberg123abc02014-07-10 12:09:07 +03001942 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001943 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001944}
1945
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946int hci_dev_cmd(unsigned int cmd, void __user *arg)
1947{
1948 struct hci_dev *hdev;
1949 struct hci_dev_req dr;
1950 int err = 0;
1951
1952 if (copy_from_user(&dr, arg, sizeof(dr)))
1953 return -EFAULT;
1954
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001955 hdev = hci_dev_get(dr.dev_id);
1956 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 return -ENODEV;
1958
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001959 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001960 err = -EBUSY;
1961 goto done;
1962 }
1963
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001964 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001965 err = -EOPNOTSUPP;
1966 goto done;
1967 }
1968
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001969 if (hdev->dev_type != HCI_BREDR) {
1970 err = -EOPNOTSUPP;
1971 goto done;
1972 }
1973
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001974 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001975 err = -EOPNOTSUPP;
1976 goto done;
1977 }
1978
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 switch (cmd) {
1980 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001981 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1982 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 break;
1984
1985 case HCISETENCRYPT:
1986 if (!lmp_encrypt_capable(hdev)) {
1987 err = -EOPNOTSUPP;
1988 break;
1989 }
1990
1991 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1992 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001993 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1994 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 if (err)
1996 break;
1997 }
1998
Johan Hedberg01178cd2013-03-05 20:37:41 +02001999 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2000 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 break;
2002
2003 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002004 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2005 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002006
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002007 /* Ensure that the connectable and discoverable states
2008 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002009 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002010 if (!err)
2011 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 break;
2013
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002014 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002015 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2016 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002017 break;
2018
2019 case HCISETLINKMODE:
2020 hdev->link_mode = ((__u16) dr.dev_opt) &
2021 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2022 break;
2023
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 case HCISETPTYPE:
2025 hdev->pkt_type = (__u16) dr.dev_opt;
2026 break;
2027
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002029 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2030 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 break;
2032
2033 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002034 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2035 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 break;
2037
2038 default:
2039 err = -EINVAL;
2040 break;
2041 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002042
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002043done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 hci_dev_put(hdev);
2045 return err;
2046}
2047
2048int hci_get_dev_list(void __user *arg)
2049{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002050 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 struct hci_dev_list_req *dl;
2052 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 int n = 0, size, err;
2054 __u16 dev_num;
2055
2056 if (get_user(dev_num, (__u16 __user *) arg))
2057 return -EFAULT;
2058
2059 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2060 return -EINVAL;
2061
2062 size = sizeof(*dl) + dev_num * sizeof(*dr);
2063
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002064 dl = kzalloc(size, GFP_KERNEL);
2065 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 return -ENOMEM;
2067
2068 dr = dl->dev_req;
2069
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002070 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002071 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002072 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002073
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002074 /* When the auto-off is configured it means the transport
2075 * is running, but in that case still indicate that the
2076 * device is actually down.
2077 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002078 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002079 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002080
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002082 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002083
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 if (++n >= dev_num)
2085 break;
2086 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002087 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088
2089 dl->dev_num = n;
2090 size = sizeof(*dl) + n * sizeof(*dr);
2091
2092 err = copy_to_user(arg, dl, size);
2093 kfree(dl);
2094
2095 return err ? -EFAULT : 0;
2096}
2097
2098int hci_get_dev_info(void __user *arg)
2099{
2100 struct hci_dev *hdev;
2101 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002102 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 int err = 0;
2104
2105 if (copy_from_user(&di, arg, sizeof(di)))
2106 return -EFAULT;
2107
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002108 hdev = hci_dev_get(di.dev_id);
2109 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 return -ENODEV;
2111
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002112 /* When the auto-off is configured it means the transport
2113 * is running, but in that case still indicate that the
2114 * device is actually down.
2115 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002116 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002117 flags = hdev->flags & ~BIT(HCI_UP);
2118 else
2119 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002120
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 strcpy(di.name, hdev->name);
2122 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002123 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002124 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002126 if (lmp_bredr_capable(hdev)) {
2127 di.acl_mtu = hdev->acl_mtu;
2128 di.acl_pkts = hdev->acl_pkts;
2129 di.sco_mtu = hdev->sco_mtu;
2130 di.sco_pkts = hdev->sco_pkts;
2131 } else {
2132 di.acl_mtu = hdev->le_mtu;
2133 di.acl_pkts = hdev->le_pkts;
2134 di.sco_mtu = 0;
2135 di.sco_pkts = 0;
2136 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 di.link_policy = hdev->link_policy;
2138 di.link_mode = hdev->link_mode;
2139
2140 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2141 memcpy(&di.features, &hdev->features, sizeof(di.features));
2142
2143 if (copy_to_user(arg, &di, sizeof(di)))
2144 err = -EFAULT;
2145
2146 hci_dev_put(hdev);
2147
2148 return err;
2149}
2150
2151/* ---- Interface to HCI drivers ---- */
2152
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002153static int hci_rfkill_set_block(void *data, bool blocked)
2154{
2155 struct hci_dev *hdev = data;
2156
2157 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2158
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002159 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002160 return -EBUSY;
2161
Johan Hedberg5e130362013-09-13 08:58:17 +03002162 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002163 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002164 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2165 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002166 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002167 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002168 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002169 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002170
2171 return 0;
2172}
2173
2174static const struct rfkill_ops hci_rfkill_ops = {
2175 .set_block = hci_rfkill_set_block,
2176};
2177
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002178static void hci_power_on(struct work_struct *work)
2179{
2180 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002181 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002182
2183 BT_DBG("%s", hdev->name);
2184
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002185 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002186 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302187 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002188 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302189 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002190 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002191 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002192
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002193 /* During the HCI setup phase, a few error conditions are
2194 * ignored and they need to be checked now. If they are still
2195 * valid, it is important to turn the device back off.
2196 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002197 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2198 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002199 (hdev->dev_type == HCI_BREDR &&
2200 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2201 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002202 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002203 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002204 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002205 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2206 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002207 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002208
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002209 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002210 /* For unconfigured devices, set the HCI_RAW flag
2211 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002212 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002213 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002214 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002215
2216 /* For fully configured devices, this will send
2217 * the Index Added event. For unconfigured devices,
2218 * it will send Unconfigued Index Added event.
2219 *
2220 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2221 * and no event will be send.
2222 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002223 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002224 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002225 /* When the controller is now configured, then it
2226 * is important to clear the HCI_RAW flag.
2227 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002228 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002229 clear_bit(HCI_RAW, &hdev->flags);
2230
Marcel Holtmannd603b762014-07-06 12:11:14 +02002231 /* Powering on the controller with HCI_CONFIG set only
2232 * happens with the transition from unconfigured to
2233 * configured. This will send the Index Added event.
2234 */
2235 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002236 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002237}
2238
2239static void hci_power_off(struct work_struct *work)
2240{
Johan Hedberg32435532011-11-07 22:16:04 +02002241 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002242 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002243
2244 BT_DBG("%s", hdev->name);
2245
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002246 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002247}
2248
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002249static void hci_error_reset(struct work_struct *work)
2250{
2251 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2252
2253 BT_DBG("%s", hdev->name);
2254
2255 if (hdev->hw_error)
2256 hdev->hw_error(hdev, hdev->hw_error_code);
2257 else
2258 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2259 hdev->hw_error_code);
2260
2261 if (hci_dev_do_close(hdev))
2262 return;
2263
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002264 hci_dev_do_open(hdev);
2265}
2266
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002267static void hci_discov_off(struct work_struct *work)
2268{
2269 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002270
2271 hdev = container_of(work, struct hci_dev, discov_off.work);
2272
2273 BT_DBG("%s", hdev->name);
2274
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002275 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002276}
2277
Florian Grandel5d900e42015-06-18 03:16:35 +02002278static void hci_adv_timeout_expire(struct work_struct *work)
2279{
2280 struct hci_dev *hdev;
2281
2282 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2283
2284 BT_DBG("%s", hdev->name);
2285
2286 mgmt_adv_timeout_expired(hdev);
2287}
2288
Johan Hedberg35f74982014-02-18 17:14:32 +02002289void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002290{
Johan Hedberg48210022013-01-27 00:31:28 +02002291 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002292
Johan Hedberg48210022013-01-27 00:31:28 +02002293 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2294 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002295 kfree(uuid);
2296 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002297}
2298
Johan Hedberg35f74982014-02-18 17:14:32 +02002299void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002300{
Johan Hedberg0378b592014-11-19 15:22:22 +02002301 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002302
Johan Hedberg0378b592014-11-19 15:22:22 +02002303 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2304 list_del_rcu(&key->list);
2305 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002306 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002307}
2308
Johan Hedberg35f74982014-02-18 17:14:32 +02002309void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002310{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002311 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002312
Johan Hedberg970d0f12014-11-13 14:37:47 +02002313 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2314 list_del_rcu(&k->list);
2315 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002316 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002317}
2318
Johan Hedberg970c4e42014-02-18 10:19:33 +02002319void hci_smp_irks_clear(struct hci_dev *hdev)
2320{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002321 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002322
Johan Hedbergadae20c2014-11-13 14:37:48 +02002323 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2324 list_del_rcu(&k->list);
2325 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002326 }
2327}
2328
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002329struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2330{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002331 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002332
Johan Hedberg0378b592014-11-19 15:22:22 +02002333 rcu_read_lock();
2334 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2335 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2336 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002337 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002338 }
2339 }
2340 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002341
2342 return NULL;
2343}
2344
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302345static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002346 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002347{
2348 /* Legacy key */
2349 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302350 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002351
2352 /* Debug keys are insecure so don't store them persistently */
2353 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302354 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002355
2356 /* Changed combination key and there's no previous one */
2357 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302358 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002359
2360 /* Security mode 3 case */
2361 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302362 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002363
Johan Hedberge3befab2014-06-01 16:33:39 +03002364 /* BR/EDR key derived using SC from an LE link */
2365 if (conn->type == LE_LINK)
2366 return true;
2367
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002368 /* Neither local nor remote side had no-bonding as requirement */
2369 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302370 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002371
2372 /* Local side had dedicated bonding as requirement */
2373 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302374 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002375
2376 /* Remote side had dedicated bonding as requirement */
2377 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302378 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002379
2380 /* If none of the above criteria match, then don't store the key
2381 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302382 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002383}
2384
Johan Hedberge804d252014-07-16 11:42:28 +03002385static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002386{
Johan Hedberge804d252014-07-16 11:42:28 +03002387 if (type == SMP_LTK)
2388 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002389
Johan Hedberge804d252014-07-16 11:42:28 +03002390 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002391}
2392
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002393struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2394 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002395{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002396 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002397
Johan Hedberg970d0f12014-11-13 14:37:47 +02002398 rcu_read_lock();
2399 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002400 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2401 continue;
2402
Johan Hedberg923e2412014-12-03 12:43:39 +02002403 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002404 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002405 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002406 }
2407 }
2408 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002409
2410 return NULL;
2411}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002412
Johan Hedberg970c4e42014-02-18 10:19:33 +02002413struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2414{
2415 struct smp_irk *irk;
2416
Johan Hedbergadae20c2014-11-13 14:37:48 +02002417 rcu_read_lock();
2418 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2419 if (!bacmp(&irk->rpa, rpa)) {
2420 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002421 return irk;
2422 }
2423 }
2424
Johan Hedbergadae20c2014-11-13 14:37:48 +02002425 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2426 if (smp_irk_matches(hdev, irk->val, rpa)) {
2427 bacpy(&irk->rpa, rpa);
2428 rcu_read_unlock();
2429 return irk;
2430 }
2431 }
2432 rcu_read_unlock();
2433
Johan Hedberg970c4e42014-02-18 10:19:33 +02002434 return NULL;
2435}
2436
2437struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2438 u8 addr_type)
2439{
2440 struct smp_irk *irk;
2441
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002442 /* Identity Address must be public or static random */
2443 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2444 return NULL;
2445
Johan Hedbergadae20c2014-11-13 14:37:48 +02002446 rcu_read_lock();
2447 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002448 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002449 bacmp(bdaddr, &irk->bdaddr) == 0) {
2450 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002451 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002452 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002453 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002454 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002455
2456 return NULL;
2457}
2458
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002459struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002460 bdaddr_t *bdaddr, u8 *val, u8 type,
2461 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002462{
2463 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302464 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002465
2466 old_key = hci_find_link_key(hdev, bdaddr);
2467 if (old_key) {
2468 old_key_type = old_key->type;
2469 key = old_key;
2470 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002471 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002472 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002473 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002474 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002475 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002476 }
2477
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002478 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002479
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002480 /* Some buggy controller combinations generate a changed
2481 * combination key for legacy pairing even when there's no
2482 * previous key */
2483 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002484 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002485 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002486 if (conn)
2487 conn->key_type = type;
2488 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002489
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002490 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002491 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002492 key->pin_len = pin_len;
2493
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002494 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002495 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002496 else
2497 key->type = type;
2498
Johan Hedberg7652ff62014-06-24 13:15:49 +03002499 if (persistent)
2500 *persistent = hci_persistent_key(hdev, conn, type,
2501 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002502
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002503 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002504}
2505
Johan Hedbergca9142b2014-02-19 14:57:44 +02002506struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002507 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002508 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002509{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002510 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002511 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002512
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002513 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002514 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002515 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002516 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002517 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002518 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002519 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002520 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002521 }
2522
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002523 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002524 key->bdaddr_type = addr_type;
2525 memcpy(key->val, tk, sizeof(key->val));
2526 key->authenticated = authenticated;
2527 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002528 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002529 key->enc_size = enc_size;
2530 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002531
Johan Hedbergca9142b2014-02-19 14:57:44 +02002532 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002533}
2534
Johan Hedbergca9142b2014-02-19 14:57:44 +02002535struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2536 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002537{
2538 struct smp_irk *irk;
2539
2540 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2541 if (!irk) {
2542 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2543 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002544 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002545
2546 bacpy(&irk->bdaddr, bdaddr);
2547 irk->addr_type = addr_type;
2548
Johan Hedbergadae20c2014-11-13 14:37:48 +02002549 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002550 }
2551
2552 memcpy(irk->val, val, 16);
2553 bacpy(&irk->rpa, rpa);
2554
Johan Hedbergca9142b2014-02-19 14:57:44 +02002555 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002556}
2557
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002558int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2559{
2560 struct link_key *key;
2561
2562 key = hci_find_link_key(hdev, bdaddr);
2563 if (!key)
2564 return -ENOENT;
2565
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002566 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002567
Johan Hedberg0378b592014-11-19 15:22:22 +02002568 list_del_rcu(&key->list);
2569 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002570
2571 return 0;
2572}
2573
Johan Hedberge0b2b272014-02-18 17:14:31 +02002574int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002575{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002576 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002577 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002578
Johan Hedberg970d0f12014-11-13 14:37:47 +02002579 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002580 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002581 continue;
2582
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002583 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002584
Johan Hedberg970d0f12014-11-13 14:37:47 +02002585 list_del_rcu(&k->list);
2586 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002587 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002588 }
2589
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002590 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002591}
2592
Johan Hedberga7ec7332014-02-18 17:14:35 +02002593void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2594{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002595 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002596
Johan Hedbergadae20c2014-11-13 14:37:48 +02002597 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002598 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2599 continue;
2600
2601 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2602
Johan Hedbergadae20c2014-11-13 14:37:48 +02002603 list_del_rcu(&k->list);
2604 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002605 }
2606}
2607
Johan Hedberg55e76b32015-03-10 22:34:40 +02002608bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2609{
2610 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002611 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002612 u8 addr_type;
2613
2614 if (type == BDADDR_BREDR) {
2615 if (hci_find_link_key(hdev, bdaddr))
2616 return true;
2617 return false;
2618 }
2619
2620 /* Convert to HCI addr type which struct smp_ltk uses */
2621 if (type == BDADDR_LE_PUBLIC)
2622 addr_type = ADDR_LE_DEV_PUBLIC;
2623 else
2624 addr_type = ADDR_LE_DEV_RANDOM;
2625
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002626 irk = hci_get_irk(hdev, bdaddr, addr_type);
2627 if (irk) {
2628 bdaddr = &irk->bdaddr;
2629 addr_type = irk->addr_type;
2630 }
2631
Johan Hedberg55e76b32015-03-10 22:34:40 +02002632 rcu_read_lock();
2633 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002634 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2635 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002636 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002637 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002638 }
2639 rcu_read_unlock();
2640
2641 return false;
2642}
2643
Ville Tervo6bd32322011-02-16 16:32:41 +02002644/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002645static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002646{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002647 struct hci_dev *hdev = container_of(work, struct hci_dev,
2648 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002649
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002650 if (hdev->sent_cmd) {
2651 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2652 u16 opcode = __le16_to_cpu(sent->opcode);
2653
2654 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2655 } else {
2656 BT_ERR("%s command tx timeout", hdev->name);
2657 }
2658
Ville Tervo6bd32322011-02-16 16:32:41 +02002659 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002660 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002661}
2662
Szymon Janc2763eda2011-03-22 13:12:22 +01002663struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002664 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002665{
2666 struct oob_data *data;
2667
Johan Hedberg6928a922014-10-26 20:46:09 +01002668 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2669 if (bacmp(bdaddr, &data->bdaddr) != 0)
2670 continue;
2671 if (data->bdaddr_type != bdaddr_type)
2672 continue;
2673 return data;
2674 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002675
2676 return NULL;
2677}
2678
Johan Hedberg6928a922014-10-26 20:46:09 +01002679int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2680 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002681{
2682 struct oob_data *data;
2683
Johan Hedberg6928a922014-10-26 20:46:09 +01002684 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002685 if (!data)
2686 return -ENOENT;
2687
Johan Hedberg6928a922014-10-26 20:46:09 +01002688 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002689
2690 list_del(&data->list);
2691 kfree(data);
2692
2693 return 0;
2694}
2695
Johan Hedberg35f74982014-02-18 17:14:32 +02002696void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002697{
2698 struct oob_data *data, *n;
2699
2700 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2701 list_del(&data->list);
2702 kfree(data);
2703 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002704}
2705
Marcel Holtmann07988722014-01-10 02:07:29 -08002706int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002707 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002708 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002709{
2710 struct oob_data *data;
2711
Johan Hedberg6928a922014-10-26 20:46:09 +01002712 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002713 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002714 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002715 if (!data)
2716 return -ENOMEM;
2717
2718 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002719 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002720 list_add(&data->list, &hdev->remote_oob_data);
2721 }
2722
Johan Hedberg81328d52014-10-26 20:33:47 +01002723 if (hash192 && rand192) {
2724 memcpy(data->hash192, hash192, sizeof(data->hash192));
2725 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002726 if (hash256 && rand256)
2727 data->present = 0x03;
Johan Hedberg81328d52014-10-26 20:33:47 +01002728 } else {
2729 memset(data->hash192, 0, sizeof(data->hash192));
2730 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002731 if (hash256 && rand256)
2732 data->present = 0x02;
2733 else
2734 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002735 }
2736
Johan Hedberg81328d52014-10-26 20:33:47 +01002737 if (hash256 && rand256) {
2738 memcpy(data->hash256, hash256, sizeof(data->hash256));
2739 memcpy(data->rand256, rand256, sizeof(data->rand256));
2740 } else {
2741 memset(data->hash256, 0, sizeof(data->hash256));
2742 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002743 if (hash192 && rand192)
2744 data->present = 0x01;
Johan Hedberg81328d52014-10-26 20:33:47 +01002745 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002746
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002747 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002748
2749 return 0;
2750}
2751
Florian Grandeld2609b32015-06-18 03:16:34 +02002752/* This function requires the caller holds hdev->lock */
2753struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2754{
2755 struct adv_info *adv_instance;
2756
2757 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2758 if (adv_instance->instance == instance)
2759 return adv_instance;
2760 }
2761
2762 return NULL;
2763}
2764
2765/* This function requires the caller holds hdev->lock */
2766struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2767 struct adv_info *cur_instance;
2768
2769 cur_instance = hci_find_adv_instance(hdev, instance);
2770 if (!cur_instance)
2771 return NULL;
2772
2773 if (cur_instance == list_last_entry(&hdev->adv_instances,
2774 struct adv_info, list))
2775 return list_first_entry(&hdev->adv_instances,
2776 struct adv_info, list);
2777 else
2778 return list_next_entry(cur_instance, list);
2779}
2780
2781/* This function requires the caller holds hdev->lock */
2782int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2783{
2784 struct adv_info *adv_instance;
2785
2786 adv_instance = hci_find_adv_instance(hdev, instance);
2787 if (!adv_instance)
2788 return -ENOENT;
2789
2790 BT_DBG("%s removing %dMR", hdev->name, instance);
2791
Florian Grandel5d900e42015-06-18 03:16:35 +02002792 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2793 cancel_delayed_work(&hdev->adv_instance_expire);
2794 hdev->adv_instance_timeout = 0;
2795 }
2796
Florian Grandeld2609b32015-06-18 03:16:34 +02002797 list_del(&adv_instance->list);
2798 kfree(adv_instance);
2799
2800 hdev->adv_instance_cnt--;
2801
2802 return 0;
2803}
2804
2805/* This function requires the caller holds hdev->lock */
2806void hci_adv_instances_clear(struct hci_dev *hdev)
2807{
2808 struct adv_info *adv_instance, *n;
2809
Florian Grandel5d900e42015-06-18 03:16:35 +02002810 if (hdev->adv_instance_timeout) {
2811 cancel_delayed_work(&hdev->adv_instance_expire);
2812 hdev->adv_instance_timeout = 0;
2813 }
2814
Florian Grandeld2609b32015-06-18 03:16:34 +02002815 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2816 list_del(&adv_instance->list);
2817 kfree(adv_instance);
2818 }
2819
2820 hdev->adv_instance_cnt = 0;
2821}
2822
2823/* This function requires the caller holds hdev->lock */
2824int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2825 u16 adv_data_len, u8 *adv_data,
2826 u16 scan_rsp_len, u8 *scan_rsp_data,
2827 u16 timeout, u16 duration)
2828{
2829 struct adv_info *adv_instance;
2830
2831 adv_instance = hci_find_adv_instance(hdev, instance);
2832 if (adv_instance) {
2833 memset(adv_instance->adv_data, 0,
2834 sizeof(adv_instance->adv_data));
2835 memset(adv_instance->scan_rsp_data, 0,
2836 sizeof(adv_instance->scan_rsp_data));
2837 } else {
2838 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2839 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2840 return -EOVERFLOW;
2841
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002842 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002843 if (!adv_instance)
2844 return -ENOMEM;
2845
Florian Grandelfffd38b2015-06-18 03:16:47 +02002846 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002847 adv_instance->instance = instance;
2848 list_add(&adv_instance->list, &hdev->adv_instances);
2849 hdev->adv_instance_cnt++;
2850 }
2851
2852 adv_instance->flags = flags;
2853 adv_instance->adv_data_len = adv_data_len;
2854 adv_instance->scan_rsp_len = scan_rsp_len;
2855
2856 if (adv_data_len)
2857 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2858
2859 if (scan_rsp_len)
2860 memcpy(adv_instance->scan_rsp_data,
2861 scan_rsp_data, scan_rsp_len);
2862
2863 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002864 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002865
2866 if (duration == 0)
2867 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2868 else
2869 adv_instance->duration = duration;
2870
2871 BT_DBG("%s for %dMR", hdev->name, instance);
2872
2873 return 0;
2874}
2875
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002876struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002877 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002878{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002879 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002880
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002881 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002882 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002883 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002884 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002885
2886 return NULL;
2887}
2888
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002889void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002890{
2891 struct list_head *p, *n;
2892
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002893 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002894 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002895
2896 list_del(p);
2897 kfree(b);
2898 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002899}
2900
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002901int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002902{
2903 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002904
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002905 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002906 return -EBADF;
2907
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002908 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002909 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002910
Johan Hedberg27f70f32014-07-21 10:50:06 +03002911 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002912 if (!entry)
2913 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002914
2915 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002916 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002917
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002918 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002919
2920 return 0;
2921}
2922
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002923int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002924{
2925 struct bdaddr_list *entry;
2926
Johan Hedberg35f74982014-02-18 17:14:32 +02002927 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002928 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002929 return 0;
2930 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002931
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002932 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002933 if (!entry)
2934 return -ENOENT;
2935
2936 list_del(&entry->list);
2937 kfree(entry);
2938
2939 return 0;
2940}
2941
Andre Guedes15819a72014-02-03 13:56:18 -03002942/* This function requires the caller holds hdev->lock */
2943struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2944 bdaddr_t *addr, u8 addr_type)
2945{
2946 struct hci_conn_params *params;
2947
2948 list_for_each_entry(params, &hdev->le_conn_params, list) {
2949 if (bacmp(&params->addr, addr) == 0 &&
2950 params->addr_type == addr_type) {
2951 return params;
2952 }
2953 }
2954
2955 return NULL;
2956}
2957
2958/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002959struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2960 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002961{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002962 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002963
Johan Hedberg501f8822014-07-04 12:37:26 +03002964 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002965 if (bacmp(&param->addr, addr) == 0 &&
2966 param->addr_type == addr_type)
2967 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002968 }
2969
2970 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002971}
2972
2973/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002974struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2975 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002976{
2977 struct hci_conn_params *params;
2978
2979 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002980 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002981 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002982
2983 params = kzalloc(sizeof(*params), GFP_KERNEL);
2984 if (!params) {
2985 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002986 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002987 }
2988
2989 bacpy(&params->addr, addr);
2990 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002991
2992 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002993 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002994
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002995 params->conn_min_interval = hdev->le_conn_min_interval;
2996 params->conn_max_interval = hdev->le_conn_max_interval;
2997 params->conn_latency = hdev->le_conn_latency;
2998 params->supervision_timeout = hdev->le_supv_timeout;
2999 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3000
3001 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3002
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003003 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003004}
3005
Johan Hedbergf6c63242014-08-15 21:06:59 +03003006static void hci_conn_params_free(struct hci_conn_params *params)
3007{
3008 if (params->conn) {
3009 hci_conn_drop(params->conn);
3010 hci_conn_put(params->conn);
3011 }
3012
3013 list_del(&params->action);
3014 list_del(&params->list);
3015 kfree(params);
3016}
3017
Andre Guedes15819a72014-02-03 13:56:18 -03003018/* This function requires the caller holds hdev->lock */
3019void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3020{
3021 struct hci_conn_params *params;
3022
3023 params = hci_conn_params_lookup(hdev, addr, addr_type);
3024 if (!params)
3025 return;
3026
Johan Hedbergf6c63242014-08-15 21:06:59 +03003027 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003028
Johan Hedberg95305ba2014-07-04 12:37:21 +03003029 hci_update_background_scan(hdev);
3030
Andre Guedes15819a72014-02-03 13:56:18 -03003031 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3032}
3033
3034/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003035void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003036{
3037 struct hci_conn_params *params, *tmp;
3038
3039 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03003040 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3041 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02003042
3043 /* If trying to estabilish one time connection to disabled
3044 * device, leave the params, but mark them as just once.
3045 */
3046 if (params->explicit_connect) {
3047 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3048 continue;
3049 }
3050
Andre Guedes15819a72014-02-03 13:56:18 -03003051 list_del(&params->list);
3052 kfree(params);
3053 }
3054
Johan Hedberg55af49a2014-07-02 17:37:26 +03003055 BT_DBG("All LE disabled connection parameters were removed");
3056}
3057
3058/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003059void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003060{
3061 struct hci_conn_params *params, *tmp;
3062
Johan Hedbergf6c63242014-08-15 21:06:59 +03003063 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3064 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003065
Johan Hedberga2f41a82014-07-04 12:37:19 +03003066 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003067
Andre Guedes15819a72014-02-03 13:56:18 -03003068 BT_DBG("All LE connection parameters were removed");
3069}
3070
Marcel Holtmann1904a852015-01-11 13:50:44 -08003071static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003072{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003073 if (status) {
3074 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003075
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003076 hci_dev_lock(hdev);
3077 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3078 hci_dev_unlock(hdev);
3079 return;
3080 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003081}
3082
Marcel Holtmann1904a852015-01-11 13:50:44 -08003083static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3084 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003085{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003086 /* General inquiry access code (GIAC) */
3087 u8 lap[3] = { 0x33, 0x8b, 0x9e };
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003088 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003089 int err;
3090
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003091 if (status) {
3092 BT_ERR("Failed to disable LE scanning: status %d", status);
3093 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003094 }
3095
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003096 hdev->discovery.scan_start = 0;
3097
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003098 switch (hdev->discovery.type) {
3099 case DISCOV_TYPE_LE:
3100 hci_dev_lock(hdev);
3101 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3102 hci_dev_unlock(hdev);
3103 break;
3104
3105 case DISCOV_TYPE_INTERLEAVED:
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003106 hci_dev_lock(hdev);
3107
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003108 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3109 &hdev->quirks)) {
3110 /* If we were running LE only scan, change discovery
3111 * state. If we were running both LE and BR/EDR inquiry
3112 * simultaneously, and BR/EDR inquiry is already
3113 * finished, stop discovery, otherwise BR/EDR inquiry
Wesley Kuo177d0502015-05-13 10:33:15 +08003114 * will stop discovery when finished. If we will resolve
3115 * remote device name, do not change discovery state.
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003116 */
Wesley Kuo177d0502015-05-13 10:33:15 +08003117 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3118 hdev->discovery.state != DISCOVERY_RESOLVING)
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003119 hci_discovery_set_state(hdev,
3120 DISCOVERY_STOPPED);
3121 } else {
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003122 struct hci_request req;
3123
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003124 hci_inquiry_cache_flush(hdev);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003125
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003126 hci_req_init(&req, hdev);
3127
3128 memset(&cp, 0, sizeof(cp));
3129 memcpy(&cp.lap, lap, sizeof(cp.lap));
3130 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3131 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3132
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003133 err = hci_req_run(&req, inquiry_complete);
3134 if (err) {
3135 BT_ERR("Inquiry request failed: err %d", err);
3136 hci_discovery_set_state(hdev,
3137 DISCOVERY_STOPPED);
3138 }
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003139 }
3140
3141 hci_dev_unlock(hdev);
3142 break;
3143 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003144}
3145
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003146static void le_scan_disable_work(struct work_struct *work)
3147{
3148 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003149 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003150 struct hci_request req;
3151 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003152
3153 BT_DBG("%s", hdev->name);
3154
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003155 cancel_delayed_work_sync(&hdev->le_scan_restart);
3156
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003157 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003158
Andre Guedesb1efcc22014-02-26 20:21:40 -03003159 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003160
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003161 err = hci_req_run(&req, le_scan_disable_work_complete);
3162 if (err)
3163 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003164}
3165
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003166static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3167 u16 opcode)
3168{
3169 unsigned long timeout, duration, scan_start, now;
3170
3171 BT_DBG("%s", hdev->name);
3172
3173 if (status) {
3174 BT_ERR("Failed to restart LE scan: status %d", status);
3175 return;
3176 }
3177
3178 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3179 !hdev->discovery.scan_start)
3180 return;
3181
3182 /* When the scan was started, hdev->le_scan_disable has been queued
3183 * after duration from scan_start. During scan restart this job
3184 * has been canceled, and we need to queue it again after proper
3185 * timeout, to make sure that scan does not run indefinitely.
3186 */
3187 duration = hdev->discovery.scan_duration;
3188 scan_start = hdev->discovery.scan_start;
3189 now = jiffies;
3190 if (now - scan_start <= duration) {
3191 int elapsed;
3192
3193 if (now >= scan_start)
3194 elapsed = now - scan_start;
3195 else
3196 elapsed = ULONG_MAX - scan_start + now;
3197
3198 timeout = duration - elapsed;
3199 } else {
3200 timeout = 0;
3201 }
3202 queue_delayed_work(hdev->workqueue,
3203 &hdev->le_scan_disable, timeout);
3204}
3205
3206static void le_scan_restart_work(struct work_struct *work)
3207{
3208 struct hci_dev *hdev = container_of(work, struct hci_dev,
3209 le_scan_restart.work);
3210 struct hci_request req;
3211 struct hci_cp_le_set_scan_enable cp;
3212 int err;
3213
3214 BT_DBG("%s", hdev->name);
3215
3216 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003217 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003218 return;
3219
3220 hci_req_init(&req, hdev);
3221
3222 hci_req_add_le_scan_disable(&req);
3223
3224 memset(&cp, 0, sizeof(cp));
3225 cp.enable = LE_SCAN_ENABLE;
3226 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3227 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3228
3229 err = hci_req_run(&req, le_scan_restart_work_complete);
3230 if (err)
3231 BT_ERR("Restart LE scan request failed: err %d", err);
3232}
3233
Johan Hedberga1f4c312014-02-27 14:05:41 +02003234/* Copy the Identity Address of the controller.
3235 *
3236 * If the controller has a public BD_ADDR, then by default use that one.
3237 * If this is a LE only controller without a public address, default to
3238 * the static random address.
3239 *
3240 * For debugging purposes it is possible to force controllers with a
3241 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003242 *
3243 * In case BR/EDR has been disabled on a dual-mode controller and
3244 * userspace has configured a static address, then that address
3245 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02003246 */
3247void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3248 u8 *bdaddr_type)
3249{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07003250 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003251 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003252 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003253 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02003254 bacpy(bdaddr, &hdev->static_addr);
3255 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3256 } else {
3257 bacpy(bdaddr, &hdev->bdaddr);
3258 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3259 }
3260}
3261
David Herrmann9be0dab2012-04-22 14:39:57 +02003262/* Alloc HCI device */
3263struct hci_dev *hci_alloc_dev(void)
3264{
3265 struct hci_dev *hdev;
3266
Johan Hedberg27f70f32014-07-21 10:50:06 +03003267 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003268 if (!hdev)
3269 return NULL;
3270
David Herrmannb1b813d2012-04-22 14:39:58 +02003271 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3272 hdev->esco_type = (ESCO_HV1);
3273 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003274 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3275 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003276 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003277 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3278 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02003279 hdev->adv_instance_cnt = 0;
3280 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02003281 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02003282
David Herrmannb1b813d2012-04-22 14:39:58 +02003283 hdev->sniff_max_interval = 800;
3284 hdev->sniff_min_interval = 80;
3285
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003286 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003287 hdev->le_adv_min_interval = 0x0800;
3288 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003289 hdev->le_scan_interval = 0x0060;
3290 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003291 hdev->le_conn_min_interval = 0x0028;
3292 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003293 hdev->le_conn_latency = 0x0000;
3294 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01003295 hdev->le_def_tx_len = 0x001b;
3296 hdev->le_def_tx_time = 0x0148;
3297 hdev->le_max_tx_len = 0x001b;
3298 hdev->le_max_tx_time = 0x0148;
3299 hdev->le_max_rx_len = 0x001b;
3300 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003301
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003302 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003303 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003304 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3305 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003306
David Herrmannb1b813d2012-04-22 14:39:58 +02003307 mutex_init(&hdev->lock);
3308 mutex_init(&hdev->req_lock);
3309
3310 INIT_LIST_HEAD(&hdev->mgmt_pending);
3311 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003312 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003313 INIT_LIST_HEAD(&hdev->uuids);
3314 INIT_LIST_HEAD(&hdev->link_keys);
3315 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003316 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003317 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003318 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003319 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003320 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003321 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003322 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02003323 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02003324
3325 INIT_WORK(&hdev->rx_work, hci_rx_work);
3326 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3327 INIT_WORK(&hdev->tx_work, hci_tx_work);
3328 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003329 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003330
David Herrmannb1b813d2012-04-22 14:39:58 +02003331 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3332 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3333 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003334 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Florian Grandel5d900e42015-06-18 03:16:35 +02003335 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
David Herrmannb1b813d2012-04-22 14:39:58 +02003336
David Herrmannb1b813d2012-04-22 14:39:58 +02003337 skb_queue_head_init(&hdev->rx_q);
3338 skb_queue_head_init(&hdev->cmd_q);
3339 skb_queue_head_init(&hdev->raw_q);
3340
3341 init_waitqueue_head(&hdev->req_wait_q);
3342
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003343 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003344
David Herrmannb1b813d2012-04-22 14:39:58 +02003345 hci_init_sysfs(hdev);
3346 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003347
3348 return hdev;
3349}
3350EXPORT_SYMBOL(hci_alloc_dev);
3351
3352/* Free HCI device */
3353void hci_free_dev(struct hci_dev *hdev)
3354{
David Herrmann9be0dab2012-04-22 14:39:57 +02003355 /* will free via device release */
3356 put_device(&hdev->dev);
3357}
3358EXPORT_SYMBOL(hci_free_dev);
3359
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360/* Register HCI device */
3361int hci_register_dev(struct hci_dev *hdev)
3362{
David Herrmannb1b813d2012-04-22 14:39:58 +02003363 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364
Marcel Holtmann74292d52014-07-06 15:50:27 +02003365 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366 return -EINVAL;
3367
Mat Martineau08add512011-11-02 16:18:36 -07003368 /* Do not allow HCI_AMP devices to register at index 0,
3369 * so the index can be used as the AMP controller ID.
3370 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003371 switch (hdev->dev_type) {
3372 case HCI_BREDR:
3373 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3374 break;
3375 case HCI_AMP:
3376 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3377 break;
3378 default:
3379 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003381
Sasha Levin3df92b32012-05-27 22:36:56 +02003382 if (id < 0)
3383 return id;
3384
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 sprintf(hdev->name, "hci%d", id);
3386 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003387
3388 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3389
Kees Cookd8537542013-07-03 15:04:57 -07003390 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3391 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003392 if (!hdev->workqueue) {
3393 error = -ENOMEM;
3394 goto err;
3395 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003396
Kees Cookd8537542013-07-03 15:04:57 -07003397 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3398 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003399 if (!hdev->req_workqueue) {
3400 destroy_workqueue(hdev->workqueue);
3401 error = -ENOMEM;
3402 goto err;
3403 }
3404
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003405 if (!IS_ERR_OR_NULL(bt_debugfs))
3406 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3407
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003408 dev_set_name(&hdev->dev, "%s", hdev->name);
3409
3410 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003411 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003412 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003414 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003415 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3416 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003417 if (hdev->rfkill) {
3418 if (rfkill_register(hdev->rfkill) < 0) {
3419 rfkill_destroy(hdev->rfkill);
3420 hdev->rfkill = NULL;
3421 }
3422 }
3423
Johan Hedberg5e130362013-09-13 08:58:17 +03003424 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003425 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003426
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003427 hci_dev_set_flag(hdev, HCI_SETUP);
3428 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003429
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003430 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003431 /* Assume BR/EDR support until proven otherwise (such as
3432 * through reading supported features during init.
3433 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003434 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003435 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003436
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003437 write_lock(&hci_dev_list_lock);
3438 list_add(&hdev->list, &hci_dev_list);
3439 write_unlock(&hci_dev_list_lock);
3440
Marcel Holtmann4a964402014-07-02 19:10:33 +02003441 /* Devices that are marked for raw-only usage are unconfigured
3442 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003443 */
3444 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003445 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003446
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003447 hci_sock_dev_event(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003448 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449
Johan Hedberg19202572013-01-14 22:33:51 +02003450 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003451
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003453
David Herrmann33ca9542011-10-08 14:58:49 +02003454err_wqueue:
3455 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003456 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003457err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003458 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003459
David Herrmann33ca9542011-10-08 14:58:49 +02003460 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461}
3462EXPORT_SYMBOL(hci_register_dev);
3463
3464/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003465void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003467 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003468
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003469 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003471 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003472
Sasha Levin3df92b32012-05-27 22:36:56 +02003473 id = hdev->id;
3474
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003475 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003477 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478
3479 hci_dev_do_close(hdev);
3480
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003481 cancel_work_sync(&hdev->power_on);
3482
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003483 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003484 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3485 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003486 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003487 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003488 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003489 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003490
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003491 /* mgmt_index_removed should take care of emptying the
3492 * pending list */
3493 BUG_ON(!list_empty(&hdev->mgmt_pending));
3494
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003495 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003497 if (hdev->rfkill) {
3498 rfkill_unregister(hdev->rfkill);
3499 rfkill_destroy(hdev->rfkill);
3500 }
3501
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003502 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003503
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003504 debugfs_remove_recursive(hdev->debugfs);
3505
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003506 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003507 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003508
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003509 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003510 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003511 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003512 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003513 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003514 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003515 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003516 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003517 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003518 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003519 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003520 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003521 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003522
David Herrmanndc946bd2012-01-07 15:47:24 +01003523 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003524
3525 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526}
3527EXPORT_SYMBOL(hci_unregister_dev);
3528
3529/* Suspend HCI device */
3530int hci_suspend_dev(struct hci_dev *hdev)
3531{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003532 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533 return 0;
3534}
3535EXPORT_SYMBOL(hci_suspend_dev);
3536
3537/* Resume HCI device */
3538int hci_resume_dev(struct hci_dev *hdev)
3539{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003540 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541 return 0;
3542}
3543EXPORT_SYMBOL(hci_resume_dev);
3544
Marcel Holtmann75e05692014-11-02 08:15:38 +01003545/* Reset HCI device */
3546int hci_reset_dev(struct hci_dev *hdev)
3547{
3548 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3549 struct sk_buff *skb;
3550
3551 skb = bt_skb_alloc(3, GFP_ATOMIC);
3552 if (!skb)
3553 return -ENOMEM;
3554
3555 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3556 memcpy(skb_put(skb, 3), hw_err, 3);
3557
3558 /* Send Hardware Error to upper stack */
3559 return hci_recv_frame(hdev, skb);
3560}
3561EXPORT_SYMBOL(hci_reset_dev);
3562
Marcel Holtmann76bca882009-11-18 00:40:39 +01003563/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003564int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003565{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003566 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003567 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003568 kfree_skb(skb);
3569 return -ENXIO;
3570 }
3571
Marcel Holtmannfe806dc2015-10-08 03:14:28 +02003572 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3573 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3574 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3575 kfree_skb(skb);
3576 return -EINVAL;
3577 }
3578
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003579 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003580 bt_cb(skb)->incoming = 1;
3581
3582 /* Time stamp */
3583 __net_timestamp(skb);
3584
Marcel Holtmann76bca882009-11-18 00:40:39 +01003585 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003586 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003587
Marcel Holtmann76bca882009-11-18 00:40:39 +01003588 return 0;
3589}
3590EXPORT_SYMBOL(hci_recv_frame);
3591
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003592/* Receive diagnostic message from HCI drivers */
3593int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3594{
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003595 /* Mark as diagnostic packet */
3596 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3597
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003598 /* Time stamp */
3599 __net_timestamp(skb);
3600
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003601 skb_queue_tail(&hdev->rx_q, skb);
3602 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003603
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003604 return 0;
3605}
3606EXPORT_SYMBOL(hci_recv_diag);
3607
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608/* ---- Interface to upper protocols ---- */
3609
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610int hci_register_cb(struct hci_cb *cb)
3611{
3612 BT_DBG("%p name %s", cb, cb->name);
3613
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003614 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003615 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003616 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617
3618 return 0;
3619}
3620EXPORT_SYMBOL(hci_register_cb);
3621
3622int hci_unregister_cb(struct hci_cb *cb)
3623{
3624 BT_DBG("%p name %s", cb, cb->name);
3625
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003626 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003628 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629
3630 return 0;
3631}
3632EXPORT_SYMBOL(hci_unregister_cb);
3633
Marcel Holtmann51086992013-10-10 14:54:19 -07003634static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003635{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003636 int err;
3637
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003638 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003640 /* Time stamp */
3641 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003643 /* Send copy to monitor */
3644 hci_send_to_monitor(hdev, skb);
3645
3646 if (atomic_read(&hdev->promisc)) {
3647 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003648 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649 }
3650
3651 /* Get rid of skb owner, prior to sending to the driver. */
3652 skb_orphan(skb);
3653
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003654 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3655 kfree_skb(skb);
3656 return;
3657 }
3658
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003659 err = hdev->send(hdev, skb);
3660 if (err < 0) {
3661 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3662 kfree_skb(skb);
3663 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664}
3665
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003666/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003667int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3668 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003669{
3670 struct sk_buff *skb;
3671
3672 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3673
3674 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3675 if (!skb) {
3676 BT_ERR("%s no memory for command", hdev->name);
3677 return -ENOMEM;
3678 }
3679
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003680 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003681 * single-command requests.
3682 */
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01003683 bt_cb(skb)->hci.req_start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02003684
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003686 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687
3688 return 0;
3689}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690
3691/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003692void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693{
3694 struct hci_command_hdr *hdr;
3695
3696 if (!hdev->sent_cmd)
3697 return NULL;
3698
3699 hdr = (void *) hdev->sent_cmd->data;
3700
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003701 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702 return NULL;
3703
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003704 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705
3706 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3707}
3708
Loic Poulainfbef1682015-09-29 15:05:44 +02003709/* Send HCI command and wait for command commplete event */
3710struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3711 const void *param, u32 timeout)
3712{
3713 struct sk_buff *skb;
3714
3715 if (!test_bit(HCI_UP, &hdev->flags))
3716 return ERR_PTR(-ENETDOWN);
3717
3718 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3719
3720 hci_req_lock(hdev);
3721 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3722 hci_req_unlock(hdev);
3723
3724 return skb;
3725}
3726EXPORT_SYMBOL(hci_cmd_sync);
3727
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728/* Send ACL data */
3729static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3730{
3731 struct hci_acl_hdr *hdr;
3732 int len = skb->len;
3733
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003734 skb_push(skb, HCI_ACL_HDR_SIZE);
3735 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003736 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003737 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3738 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739}
3740
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003741static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003742 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003744 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745 struct hci_dev *hdev = conn->hdev;
3746 struct sk_buff *list;
3747
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003748 skb->len = skb_headlen(skb);
3749 skb->data_len = 0;
3750
3751 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003752
3753 switch (hdev->dev_type) {
3754 case HCI_BREDR:
3755 hci_add_acl_hdr(skb, conn->handle, flags);
3756 break;
3757 case HCI_AMP:
3758 hci_add_acl_hdr(skb, chan->handle, flags);
3759 break;
3760 default:
3761 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3762 return;
3763 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003764
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003765 list = skb_shinfo(skb)->frag_list;
3766 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767 /* Non fragmented */
3768 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3769
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003770 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771 } else {
3772 /* Fragmented */
3773 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3774
3775 skb_shinfo(skb)->frag_list = NULL;
3776
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003777 /* Queue all fragments atomically. We need to use spin_lock_bh
3778 * here because of 6LoWPAN links, as there this function is
3779 * called from softirq and using normal spin lock could cause
3780 * deadlocks.
3781 */
3782 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003784 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003785
3786 flags &= ~ACL_START;
3787 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003788 do {
3789 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003790
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003791 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003792 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793
3794 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3795
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003796 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797 } while (list);
3798
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003799 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003801}
3802
3803void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3804{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003805 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003806
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003807 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003808
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003809 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003811 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813
3814/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003815void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816{
3817 struct hci_dev *hdev = conn->hdev;
3818 struct hci_sco_hdr hdr;
3819
3820 BT_DBG("%s len %d", hdev->name, skb->len);
3821
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003822 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823 hdr.dlen = skb->len;
3824
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003825 skb_push(skb, HCI_SCO_HDR_SIZE);
3826 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003827 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003829 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003830
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003832 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834
3835/* ---- HCI TX task (outgoing data) ---- */
3836
3837/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003838static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3839 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840{
3841 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003842 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003843 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003845 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003847
3848 rcu_read_lock();
3849
3850 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003851 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003853
3854 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3855 continue;
3856
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857 num++;
3858
3859 if (c->sent < min) {
3860 min = c->sent;
3861 conn = c;
3862 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003863
3864 if (hci_conn_num(hdev, type) == num)
3865 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866 }
3867
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003868 rcu_read_unlock();
3869
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003871 int cnt, q;
3872
3873 switch (conn->type) {
3874 case ACL_LINK:
3875 cnt = hdev->acl_cnt;
3876 break;
3877 case SCO_LINK:
3878 case ESCO_LINK:
3879 cnt = hdev->sco_cnt;
3880 break;
3881 case LE_LINK:
3882 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3883 break;
3884 default:
3885 cnt = 0;
3886 BT_ERR("Unknown link type");
3887 }
3888
3889 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890 *quote = q ? q : 1;
3891 } else
3892 *quote = 0;
3893
3894 BT_DBG("conn %p quote %d", conn, *quote);
3895 return conn;
3896}
3897
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003898static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899{
3900 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003901 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902
Ville Tervobae1f5d92011-02-10 22:38:53 -03003903 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003905 rcu_read_lock();
3906
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003908 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003909 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003910 BT_ERR("%s killing stalled connection %pMR",
3911 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003912 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003913 }
3914 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003915
3916 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917}
3918
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003919static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3920 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003921{
3922 struct hci_conn_hash *h = &hdev->conn_hash;
3923 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003924 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003925 struct hci_conn *conn;
3926 int cnt, q, conn_num = 0;
3927
3928 BT_DBG("%s", hdev->name);
3929
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003930 rcu_read_lock();
3931
3932 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003933 struct hci_chan *tmp;
3934
3935 if (conn->type != type)
3936 continue;
3937
3938 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3939 continue;
3940
3941 conn_num++;
3942
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003943 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003944 struct sk_buff *skb;
3945
3946 if (skb_queue_empty(&tmp->data_q))
3947 continue;
3948
3949 skb = skb_peek(&tmp->data_q);
3950 if (skb->priority < cur_prio)
3951 continue;
3952
3953 if (skb->priority > cur_prio) {
3954 num = 0;
3955 min = ~0;
3956 cur_prio = skb->priority;
3957 }
3958
3959 num++;
3960
3961 if (conn->sent < min) {
3962 min = conn->sent;
3963 chan = tmp;
3964 }
3965 }
3966
3967 if (hci_conn_num(hdev, type) == conn_num)
3968 break;
3969 }
3970
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003971 rcu_read_unlock();
3972
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003973 if (!chan)
3974 return NULL;
3975
3976 switch (chan->conn->type) {
3977 case ACL_LINK:
3978 cnt = hdev->acl_cnt;
3979 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003980 case AMP_LINK:
3981 cnt = hdev->block_cnt;
3982 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003983 case SCO_LINK:
3984 case ESCO_LINK:
3985 cnt = hdev->sco_cnt;
3986 break;
3987 case LE_LINK:
3988 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3989 break;
3990 default:
3991 cnt = 0;
3992 BT_ERR("Unknown link type");
3993 }
3994
3995 q = cnt / num;
3996 *quote = q ? q : 1;
3997 BT_DBG("chan %p quote %d", chan, *quote);
3998 return chan;
3999}
4000
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004001static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4002{
4003 struct hci_conn_hash *h = &hdev->conn_hash;
4004 struct hci_conn *conn;
4005 int num = 0;
4006
4007 BT_DBG("%s", hdev->name);
4008
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004009 rcu_read_lock();
4010
4011 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004012 struct hci_chan *chan;
4013
4014 if (conn->type != type)
4015 continue;
4016
4017 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4018 continue;
4019
4020 num++;
4021
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004022 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004023 struct sk_buff *skb;
4024
4025 if (chan->sent) {
4026 chan->sent = 0;
4027 continue;
4028 }
4029
4030 if (skb_queue_empty(&chan->data_q))
4031 continue;
4032
4033 skb = skb_peek(&chan->data_q);
4034 if (skb->priority >= HCI_PRIO_MAX - 1)
4035 continue;
4036
4037 skb->priority = HCI_PRIO_MAX - 1;
4038
4039 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004040 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004041 }
4042
4043 if (hci_conn_num(hdev, type) == num)
4044 break;
4045 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004046
4047 rcu_read_unlock();
4048
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004049}
4050
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004051static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4052{
4053 /* Calculate count of blocks used by this packet */
4054 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4055}
4056
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004057static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004059 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060 /* ACL tx timeout must be longer than maximum
4061 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004062 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004063 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004064 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004066}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004068static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004069{
4070 unsigned int cnt = hdev->acl_cnt;
4071 struct hci_chan *chan;
4072 struct sk_buff *skb;
4073 int quote;
4074
4075 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004076
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004077 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004078 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004079 u32 priority = (skb_peek(&chan->data_q))->priority;
4080 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004081 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004082 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004083
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004084 /* Stop if priority has changed */
4085 if (skb->priority < priority)
4086 break;
4087
4088 skb = skb_dequeue(&chan->data_q);
4089
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004090 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004091 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004092
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004093 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094 hdev->acl_last_tx = jiffies;
4095
4096 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004097 chan->sent++;
4098 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099 }
4100 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004101
4102 if (cnt != hdev->acl_cnt)
4103 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104}
4105
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004106static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004107{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004108 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004109 struct hci_chan *chan;
4110 struct sk_buff *skb;
4111 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004112 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004113
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004114 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004115
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004116 BT_DBG("%s", hdev->name);
4117
4118 if (hdev->dev_type == HCI_AMP)
4119 type = AMP_LINK;
4120 else
4121 type = ACL_LINK;
4122
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004123 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004124 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004125 u32 priority = (skb_peek(&chan->data_q))->priority;
4126 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4127 int blocks;
4128
4129 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004130 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004131
4132 /* Stop if priority has changed */
4133 if (skb->priority < priority)
4134 break;
4135
4136 skb = skb_dequeue(&chan->data_q);
4137
4138 blocks = __get_blocks(hdev, skb);
4139 if (blocks > hdev->block_cnt)
4140 return;
4141
4142 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004143 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004144
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004145 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004146 hdev->acl_last_tx = jiffies;
4147
4148 hdev->block_cnt -= blocks;
4149 quote -= blocks;
4150
4151 chan->sent += blocks;
4152 chan->conn->sent += blocks;
4153 }
4154 }
4155
4156 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004157 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004158}
4159
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004160static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004161{
4162 BT_DBG("%s", hdev->name);
4163
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004164 /* No ACL link over BR/EDR controller */
4165 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4166 return;
4167
4168 /* No AMP link over AMP controller */
4169 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004170 return;
4171
4172 switch (hdev->flow_ctl_mode) {
4173 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4174 hci_sched_acl_pkt(hdev);
4175 break;
4176
4177 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4178 hci_sched_acl_blk(hdev);
4179 break;
4180 }
4181}
4182
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004184static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185{
4186 struct hci_conn *conn;
4187 struct sk_buff *skb;
4188 int quote;
4189
4190 BT_DBG("%s", hdev->name);
4191
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004192 if (!hci_conn_num(hdev, SCO_LINK))
4193 return;
4194
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4196 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4197 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004198 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199
4200 conn->sent++;
4201 if (conn->sent == ~0)
4202 conn->sent = 0;
4203 }
4204 }
4205}
4206
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004207static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004208{
4209 struct hci_conn *conn;
4210 struct sk_buff *skb;
4211 int quote;
4212
4213 BT_DBG("%s", hdev->name);
4214
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004215 if (!hci_conn_num(hdev, ESCO_LINK))
4216 return;
4217
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004218 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4219 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004220 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4221 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004222 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004223
4224 conn->sent++;
4225 if (conn->sent == ~0)
4226 conn->sent = 0;
4227 }
4228 }
4229}
4230
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004231static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004232{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004233 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004234 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004235 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004236
4237 BT_DBG("%s", hdev->name);
4238
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004239 if (!hci_conn_num(hdev, LE_LINK))
4240 return;
4241
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004242 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004243 /* LE tx timeout must be longer than maximum
4244 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004245 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004246 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004247 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004248 }
4249
4250 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004251 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004252 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004253 u32 priority = (skb_peek(&chan->data_q))->priority;
4254 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004255 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004256 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004257
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004258 /* Stop if priority has changed */
4259 if (skb->priority < priority)
4260 break;
4261
4262 skb = skb_dequeue(&chan->data_q);
4263
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004264 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004265 hdev->le_last_tx = jiffies;
4266
4267 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004268 chan->sent++;
4269 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004270 }
4271 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004272
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004273 if (hdev->le_pkts)
4274 hdev->le_cnt = cnt;
4275 else
4276 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004277
4278 if (cnt != tmp)
4279 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004280}
4281
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004282static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004284 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 struct sk_buff *skb;
4286
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004287 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004288 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004290 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07004291 /* Schedule queues and send stuff to HCI driver */
4292 hci_sched_acl(hdev);
4293 hci_sched_sco(hdev);
4294 hci_sched_esco(hdev);
4295 hci_sched_le(hdev);
4296 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004297
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 /* Send next queued raw (unknown type) packet */
4299 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004300 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301}
4302
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004303/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304
4305/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004306static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307{
4308 struct hci_acl_hdr *hdr = (void *) skb->data;
4309 struct hci_conn *conn;
4310 __u16 handle, flags;
4311
4312 skb_pull(skb, HCI_ACL_HDR_SIZE);
4313
4314 handle = __le16_to_cpu(hdr->handle);
4315 flags = hci_flags(handle);
4316 handle = hci_handle(handle);
4317
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004318 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004319 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320
4321 hdev->stat.acl_rx++;
4322
4323 hci_dev_lock(hdev);
4324 conn = hci_conn_hash_lookup_handle(hdev, handle);
4325 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004326
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004328 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004329
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004331 l2cap_recv_acldata(conn, skb, flags);
4332 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004334 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004335 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336 }
4337
4338 kfree_skb(skb);
4339}
4340
4341/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004342static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343{
4344 struct hci_sco_hdr *hdr = (void *) skb->data;
4345 struct hci_conn *conn;
4346 __u16 handle;
4347
4348 skb_pull(skb, HCI_SCO_HDR_SIZE);
4349
4350 handle = __le16_to_cpu(hdr->handle);
4351
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004352 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353
4354 hdev->stat.sco_rx++;
4355
4356 hci_dev_lock(hdev);
4357 conn = hci_conn_hash_lookup_handle(hdev, handle);
4358 hci_dev_unlock(hdev);
4359
4360 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004361 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004362 sco_recv_scodata(conn, skb);
4363 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004365 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004366 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367 }
4368
4369 kfree_skb(skb);
4370}
4371
Johan Hedberg9238f362013-03-05 20:37:48 +02004372static bool hci_req_is_complete(struct hci_dev *hdev)
4373{
4374 struct sk_buff *skb;
4375
4376 skb = skb_peek(&hdev->cmd_q);
4377 if (!skb)
4378 return true;
4379
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004380 return bt_cb(skb)->hci.req_start;
Johan Hedberg9238f362013-03-05 20:37:48 +02004381}
4382
Johan Hedberg42c6b122013-03-05 20:37:49 +02004383static void hci_resend_last(struct hci_dev *hdev)
4384{
4385 struct hci_command_hdr *sent;
4386 struct sk_buff *skb;
4387 u16 opcode;
4388
4389 if (!hdev->sent_cmd)
4390 return;
4391
4392 sent = (void *) hdev->sent_cmd->data;
4393 opcode = __le16_to_cpu(sent->opcode);
4394 if (opcode == HCI_OP_RESET)
4395 return;
4396
4397 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4398 if (!skb)
4399 return;
4400
4401 skb_queue_head(&hdev->cmd_q, skb);
4402 queue_work(hdev->workqueue, &hdev->cmd_work);
4403}
4404
Johan Hedberge62144872015-04-02 13:41:08 +03004405void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4406 hci_req_complete_t *req_complete,
4407 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004408{
Johan Hedberg9238f362013-03-05 20:37:48 +02004409 struct sk_buff *skb;
4410 unsigned long flags;
4411
4412 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4413
Johan Hedberg42c6b122013-03-05 20:37:49 +02004414 /* If the completed command doesn't match the last one that was
4415 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004416 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004417 if (!hci_sent_cmd_data(hdev, opcode)) {
4418 /* Some CSR based controllers generate a spontaneous
4419 * reset complete event during init and any pending
4420 * command will never be completed. In such a case we
4421 * need to resend whatever was the last sent
4422 * command.
4423 */
4424 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4425 hci_resend_last(hdev);
4426
Johan Hedberg9238f362013-03-05 20:37:48 +02004427 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004428 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004429
4430 /* If the command succeeded and there's still more commands in
4431 * this request the request is not yet complete.
4432 */
4433 if (!status && !hci_req_is_complete(hdev))
4434 return;
4435
4436 /* If this was the last command in a request the complete
4437 * callback would be found in hdev->sent_cmd instead of the
4438 * command queue (hdev->cmd_q).
4439 */
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004440 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4441 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
Johan Hedberge62144872015-04-02 13:41:08 +03004442 return;
4443 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004444
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004445 if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
4446 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
Johan Hedberge62144872015-04-02 13:41:08 +03004447 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004448 }
4449
4450 /* Remove all pending commands belonging to this request */
4451 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4452 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004453 if (bt_cb(skb)->hci.req_start) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004454 __skb_queue_head(&hdev->cmd_q, skb);
4455 break;
4456 }
4457
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004458 *req_complete = bt_cb(skb)->hci.req_complete;
4459 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
Johan Hedberg9238f362013-03-05 20:37:48 +02004460 kfree_skb(skb);
4461 }
4462 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004463}
4464
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004465static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004467 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468 struct sk_buff *skb;
4469
4470 BT_DBG("%s", hdev->name);
4471
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004473 /* Send copy to monitor */
4474 hci_send_to_monitor(hdev, skb);
4475
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 if (atomic_read(&hdev->promisc)) {
4477 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004478 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 }
4480
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004481 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482 kfree_skb(skb);
4483 continue;
4484 }
4485
4486 if (test_bit(HCI_INIT, &hdev->flags)) {
4487 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004488 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489 case HCI_ACLDATA_PKT:
4490 case HCI_SCODATA_PKT:
4491 kfree_skb(skb);
4492 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494 }
4495
4496 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004497 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004499 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500 hci_event_packet(hdev, skb);
4501 break;
4502
4503 case HCI_ACLDATA_PKT:
4504 BT_DBG("%s ACL data packet", hdev->name);
4505 hci_acldata_packet(hdev, skb);
4506 break;
4507
4508 case HCI_SCODATA_PKT:
4509 BT_DBG("%s SCO data packet", hdev->name);
4510 hci_scodata_packet(hdev, skb);
4511 break;
4512
4513 default:
4514 kfree_skb(skb);
4515 break;
4516 }
4517 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518}
4519
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004520static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004521{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004522 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523 struct sk_buff *skb;
4524
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004525 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4526 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004529 if (atomic_read(&hdev->cmd_cnt)) {
4530 skb = skb_dequeue(&hdev->cmd_q);
4531 if (!skb)
4532 return;
4533
Wei Yongjun7585b972009-02-25 18:29:52 +08004534 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004535
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004536 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004537 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004538 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004539 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004540 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004541 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004542 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004543 schedule_delayed_work(&hdev->cmd_timer,
4544 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004545 } else {
4546 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004547 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548 }
4549 }
4550}