blob: bc97fc6de8765689e42e98c21e9c10ca2ca6587c [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020054DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070068/* ---- HCI debugfs entries ---- */
69
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070070static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
71 size_t count, loff_t *ppos)
72{
73 struct hci_dev *hdev = file->private_data;
74 char buf[3];
75
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070076 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077 buf[1] = '\n';
78 buf[2] = '\0';
79 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
80}
81
82static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
83 size_t count, loff_t *ppos)
84{
85 struct hci_dev *hdev = file->private_data;
86 struct sk_buff *skb;
87 char buf[32];
88 size_t buf_size = min(count, (sizeof(buf)-1));
89 bool enable;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070090
91 if (!test_bit(HCI_UP, &hdev->flags))
92 return -ENETDOWN;
93
94 if (copy_from_user(buf, user_buf, buf_size))
95 return -EFAULT;
96
97 buf[buf_size] = '\0';
98 if (strtobool(buf, &enable))
99 return -EINVAL;
100
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700101 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700102 return -EALREADY;
103
104 hci_req_lock(hdev);
105 if (enable)
106 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 else
109 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
110 HCI_CMD_TIMEOUT);
111 hci_req_unlock(hdev);
112
113 if (IS_ERR(skb))
114 return PTR_ERR(skb);
115
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700116 kfree_skb(skb);
117
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700118 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200130static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
131 size_t count, loff_t *ppos)
132{
133 struct hci_dev *hdev = file->private_data;
134 char buf[3];
135
136 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
137 buf[1] = '\n';
138 buf[2] = '\0';
139 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
140}
141
142static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
143 size_t count, loff_t *ppos)
144{
145 struct hci_dev *hdev = file->private_data;
146 char buf[32];
147 size_t buf_size = min(count, (sizeof(buf)-1));
148 bool enable;
149 int err;
150
151 if (copy_from_user(buf, user_buf, buf_size))
152 return -EFAULT;
153
154 buf[buf_size] = '\0';
155 if (strtobool(buf, &enable))
156 return -EINVAL;
157
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200158 /* When the diagnostic flags are not persistent and the transport
159 * is not active, then there is no need for the vendor callback.
160 *
161 * Instead just store the desired value. If needed the setting
162 * will be programmed when the controller gets powered on.
163 */
164 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
165 !test_bit(HCI_RUNNING, &hdev->flags))
166 goto done;
167
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200168 hci_req_lock(hdev);
169 err = hdev->set_diag(hdev, enable);
170 hci_req_unlock(hdev);
171
172 if (err < 0)
173 return err;
174
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200175done:
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200176 if (enable)
177 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
178 else
179 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
180
181 return count;
182}
183
184static const struct file_operations vendor_diag_fops = {
185 .open = simple_open,
186 .read = vendor_diag_read,
187 .write = vendor_diag_write,
188 .llseek = default_llseek,
189};
190
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200191static void hci_debugfs_create_basic(struct hci_dev *hdev)
192{
193 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
194 &dut_mode_fops);
195
196 if (hdev->set_diag)
197 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
198 &vendor_diag_fops);
199}
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201/* ---- HCI requests ---- */
202
Johan Hedbergf60cb302015-04-02 13:41:09 +0300203static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
204 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200206 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208 if (hdev->req_status == HCI_REQ_PEND) {
209 hdev->req_result = result;
210 hdev->req_status = HCI_REQ_DONE;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300211 if (skb)
212 hdev->req_skb = skb_get(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 wake_up_interruptible(&hdev->req_wait_q);
214 }
215}
216
217static void hci_req_cancel(struct hci_dev *hdev, int err)
218{
219 BT_DBG("%s err 0x%2.2x", hdev->name, err);
220
221 if (hdev->req_status == HCI_REQ_PEND) {
222 hdev->req_result = err;
223 hdev->req_status = HCI_REQ_CANCELED;
224 wake_up_interruptible(&hdev->req_wait_q);
225 }
226}
227
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300228struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300229 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300230{
231 DECLARE_WAITQUEUE(wait, current);
232 struct hci_request req;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300233 struct sk_buff *skb;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300234 int err = 0;
235
236 BT_DBG("%s", hdev->name);
237
238 hci_req_init(&req, hdev);
239
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300240 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300241
242 hdev->req_status = HCI_REQ_PEND;
243
Johan Hedberg75e84b72013-04-02 13:35:04 +0300244 add_wait_queue(&hdev->req_wait_q, &wait);
245 set_current_state(TASK_INTERRUPTIBLE);
246
Johan Hedbergf60cb302015-04-02 13:41:09 +0300247 err = hci_req_run_skb(&req, hci_req_sync_complete);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900248 if (err < 0) {
249 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200250 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900251 return ERR_PTR(err);
252 }
253
Johan Hedberg75e84b72013-04-02 13:35:04 +0300254 schedule_timeout(timeout);
255
256 remove_wait_queue(&hdev->req_wait_q, &wait);
257
258 if (signal_pending(current))
259 return ERR_PTR(-EINTR);
260
261 switch (hdev->req_status) {
262 case HCI_REQ_DONE:
263 err = -bt_to_errno(hdev->req_result);
264 break;
265
266 case HCI_REQ_CANCELED:
267 err = -hdev->req_result;
268 break;
269
270 default:
271 err = -ETIMEDOUT;
272 break;
273 }
274
275 hdev->req_status = hdev->req_result = 0;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300276 skb = hdev->req_skb;
277 hdev->req_skb = NULL;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300278
279 BT_DBG("%s end: err %d", hdev->name, err);
280
Johan Hedbergf60cb302015-04-02 13:41:09 +0300281 if (err < 0) {
282 kfree_skb(skb);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300283 return ERR_PTR(err);
Johan Hedbergf60cb302015-04-02 13:41:09 +0300284 }
Johan Hedberg75e84b72013-04-02 13:35:04 +0300285
Johan Hedberg757aa0b2015-04-02 13:41:12 +0300286 if (!skb)
287 return ERR_PTR(-ENODATA);
288
289 return skb;
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300290}
291EXPORT_SYMBOL(__hci_cmd_sync_ev);
292
293struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300294 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300295{
296 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300297}
298EXPORT_SYMBOL(__hci_cmd_sync);
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200301static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200302 void (*func)(struct hci_request *req,
303 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200304 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200306 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 DECLARE_WAITQUEUE(wait, current);
308 int err = 0;
309
310 BT_DBG("%s start", hdev->name);
311
Johan Hedberg42c6b122013-03-05 20:37:49 +0200312 hci_req_init(&req, hdev);
313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 hdev->req_status = HCI_REQ_PEND;
315
Johan Hedberg42c6b122013-03-05 20:37:49 +0200316 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200317
Chan-yeol Park039fada2014-10-31 14:23:06 +0900318 add_wait_queue(&hdev->req_wait_q, &wait);
319 set_current_state(TASK_INTERRUPTIBLE);
320
Johan Hedbergf60cb302015-04-02 13:41:09 +0300321 err = hci_req_run_skb(&req, hci_req_sync_complete);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200322 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200323 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300324
Chan-yeol Park039fada2014-10-31 14:23:06 +0900325 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200326 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900327
Andre Guedes920c8302013-03-08 11:20:15 -0300328 /* ENODATA means the HCI request command queue is empty.
329 * This can happen when a request with conditionals doesn't
330 * trigger any commands to be sent. This is normal behavior
331 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200332 */
Andre Guedes920c8302013-03-08 11:20:15 -0300333 if (err == -ENODATA)
334 return 0;
335
336 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200337 }
338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 schedule_timeout(timeout);
340
341 remove_wait_queue(&hdev->req_wait_q, &wait);
342
343 if (signal_pending(current))
344 return -EINTR;
345
346 switch (hdev->req_status) {
347 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700348 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 break;
350
351 case HCI_REQ_CANCELED:
352 err = -hdev->req_result;
353 break;
354
355 default:
356 err = -ETIMEDOUT;
357 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700358 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
Johan Hedberga5040ef2011-01-10 13:28:59 +0200360 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
362 BT_DBG("%s end: err %d", hdev->name, err);
363
364 return err;
365}
366
Johan Hedberg01178cd2013-03-05 20:37:41 +0200367static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200368 void (*req)(struct hci_request *req,
369 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200370 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
372 int ret;
373
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200374 if (!test_bit(HCI_UP, &hdev->flags))
375 return -ENETDOWN;
376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 /* Serialize all requests */
378 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200379 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 hci_req_unlock(hdev);
381
382 return ret;
383}
384
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 set_bit(HCI_RESET, &req->hdev->flags);
391 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392}
393
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200396 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200399 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200401 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200402 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200403
404 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200405 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406}
407
Johan Hedberg0af801b2015-02-17 15:05:21 +0200408static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200409{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200411
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200412 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200413 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300414
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700415 /* Read Local Supported Commands */
416 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
417
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300418 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300420
421 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200422 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700423
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700424 /* Read Flow Control Mode */
425 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
426
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700427 /* Read Location Data */
428 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200429}
430
Johan Hedberg0af801b2015-02-17 15:05:21 +0200431static void amp_init2(struct hci_request *req)
432{
433 /* Read Local Supported Features. Not all AMP controllers
434 * support this so it's placed conditionally in the second
435 * stage init.
436 */
437 if (req->hdev->commands[14] & 0x20)
438 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
439}
440
Johan Hedberg42c6b122013-03-05 20:37:49 +0200441static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200442{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200443 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200444
445 BT_DBG("%s %ld", hdev->name, opt);
446
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300447 /* Reset */
448 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200449 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300450
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200451 switch (hdev->dev_type) {
452 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200454 break;
455
456 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200457 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200458 break;
459
460 default:
461 BT_ERR("Unknown device type %d", hdev->dev_type);
462 break;
463 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200464}
465
Johan Hedberg42c6b122013-03-05 20:37:49 +0200466static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200468 __le16 param;
469 __u8 flt_type;
470
471 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200472 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200473
474 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200475 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200476
477 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200478 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200479
480 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700483 /* Read Number of Supported IAC */
484 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
485
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700486 /* Read Current IAC LAP */
487 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
488
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489 /* Clear Event Filters */
490 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200491 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200492
493 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700494 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200495 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200496}
497
Johan Hedberg42c6b122013-03-05 20:37:49 +0200498static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200499{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300500 struct hci_dev *hdev = req->hdev;
501
Johan Hedberg2177bab2013-03-05 20:37:43 +0200502 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200503 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200504
505 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800508 /* Read LE Supported States */
509 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
510
Johan Hedbergc73eee92013-04-19 18:35:21 +0300511 /* LE-only controllers have LE implicitly enabled */
512 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700513 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 /* The second byte is 0xff instead of 0x9f (two reserved bits
521 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
522 * command otherwise.
523 */
524 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
525
526 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
527 * any event mask for pre 1.2 devices.
528 */
529 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
530 return;
531
532 if (lmp_bredr_capable(hdev)) {
533 events[4] |= 0x01; /* Flow Specification Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700534 } else {
535 /* Use a different default for LE-only devices */
536 memset(events, 0, sizeof(events));
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700537 events[1] |= 0x20; /* Command Complete */
538 events[1] |= 0x40; /* Command Status */
539 events[1] |= 0x80; /* Hardware Error */
Marcel Holtmann5c3d3b42015-11-04 07:17:23 +0100540
541 /* If the controller supports the Disconnect command, enable
542 * the corresponding event. In addition enable packet flow
543 * control related events.
544 */
545 if (hdev->commands[0] & 0x20) {
546 events[0] |= 0x10; /* Disconnection Complete */
547 events[2] |= 0x04; /* Number of Completed Packets */
548 events[3] |= 0x02; /* Data Buffer Overflow */
549 }
550
551 /* If the controller supports the Read Remote Version
552 * Information command, enable the corresponding event.
553 */
554 if (hdev->commands[2] & 0x80)
555 events[1] |= 0x08; /* Read Remote Version Information
556 * Complete
557 */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200558
559 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
560 events[0] |= 0x80; /* Encryption Change */
561 events[5] |= 0x80; /* Encryption Key Refresh Complete */
562 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200563 }
564
Marcel Holtmann9fe759c2015-11-01 09:45:22 +0100565 if (lmp_inq_rssi_capable(hdev) ||
566 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
Johan Hedberg2177bab2013-03-05 20:37:43 +0200567 events[4] |= 0x02; /* Inquiry Result with RSSI */
568
Marcel Holtmann70f56aa2015-11-01 09:39:49 +0100569 if (lmp_ext_feat_capable(hdev))
570 events[4] |= 0x04; /* Read Remote Extended Features Complete */
571
572 if (lmp_esco_capable(hdev)) {
573 events[5] |= 0x08; /* Synchronous Connection Complete */
574 events[5] |= 0x10; /* Synchronous Connection Changed */
575 }
576
Johan Hedberg2177bab2013-03-05 20:37:43 +0200577 if (lmp_sniffsubr_capable(hdev))
578 events[5] |= 0x20; /* Sniff Subrating */
579
580 if (lmp_pause_enc_capable(hdev))
581 events[5] |= 0x80; /* Encryption Key Refresh Complete */
582
583 if (lmp_ext_inq_capable(hdev))
584 events[5] |= 0x40; /* Extended Inquiry Result */
585
586 if (lmp_no_flush_capable(hdev))
587 events[7] |= 0x01; /* Enhanced Flush Complete */
588
589 if (lmp_lsto_capable(hdev))
590 events[6] |= 0x80; /* Link Supervision Timeout Changed */
591
592 if (lmp_ssp_capable(hdev)) {
593 events[6] |= 0x01; /* IO Capability Request */
594 events[6] |= 0x02; /* IO Capability Response */
595 events[6] |= 0x04; /* User Confirmation Request */
596 events[6] |= 0x08; /* User Passkey Request */
597 events[6] |= 0x10; /* Remote OOB Data Request */
598 events[6] |= 0x20; /* Simple Pairing Complete */
599 events[7] |= 0x04; /* User Passkey Notification */
600 events[7] |= 0x08; /* Keypress Notification */
601 events[7] |= 0x10; /* Remote Host Supported
602 * Features Notification
603 */
604 }
605
606 if (lmp_le_capable(hdev))
607 events[7] |= 0x20; /* LE Meta-Event */
608
Johan Hedberg42c6b122013-03-05 20:37:49 +0200609 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610}
611
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200613{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614 struct hci_dev *hdev = req->hdev;
615
Johan Hedberg0af801b2015-02-17 15:05:21 +0200616 if (hdev->dev_type == HCI_AMP)
617 return amp_init2(req);
618
Johan Hedberg2177bab2013-03-05 20:37:43 +0200619 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200620 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300621 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700622 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200623
624 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200625 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200626
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100627 /* All Bluetooth 1.2 and later controllers should support the
628 * HCI command for reading the local supported commands.
629 *
630 * Unfortunately some controllers indicate Bluetooth 1.2 support,
631 * but do not have support for this command. If that is the case,
632 * the driver can quirk the behavior and skip reading the local
633 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300634 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100635 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
636 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200637 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200638
639 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700640 /* When SSP is available, then the host features page
641 * should also be available as well. However some
642 * controllers list the max_page as 0 as long as SSP
643 * has not been enabled. To achieve proper debugging
644 * output, force the minimum max_page to 1 at least.
645 */
646 hdev->max_page = 0x01;
647
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700648 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200649 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800650
Johan Hedberg42c6b122013-03-05 20:37:49 +0200651 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
652 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200653 } else {
654 struct hci_cp_write_eir cp;
655
656 memset(hdev->eir, 0, sizeof(hdev->eir));
657 memset(&cp, 0, sizeof(cp));
658
Johan Hedberg42c6b122013-03-05 20:37:49 +0200659 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660 }
661 }
662
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800663 if (lmp_inq_rssi_capable(hdev) ||
664 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800665 u8 mode;
666
667 /* If Extended Inquiry Result events are supported, then
668 * they are clearly preferred over Inquiry Result with RSSI
669 * events.
670 */
671 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
672
673 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
674 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200675
676 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200678
679 if (lmp_ext_feat_capable(hdev)) {
680 struct hci_cp_read_local_ext_features cp;
681
682 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
684 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200685 }
686
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700687 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200688 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200689 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
690 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200691 }
692}
693
Johan Hedberg42c6b122013-03-05 20:37:49 +0200694static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200695{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200696 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200697 struct hci_cp_write_def_link_policy cp;
698 u16 link_policy = 0;
699
700 if (lmp_rswitch_capable(hdev))
701 link_policy |= HCI_LP_RSWITCH;
702 if (lmp_hold_capable(hdev))
703 link_policy |= HCI_LP_HOLD;
704 if (lmp_sniff_capable(hdev))
705 link_policy |= HCI_LP_SNIFF;
706 if (lmp_park_capable(hdev))
707 link_policy |= HCI_LP_PARK;
708
709 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200710 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200711}
712
Johan Hedberg42c6b122013-03-05 20:37:49 +0200713static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200714{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200715 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200716 struct hci_cp_write_le_host_supported cp;
717
Johan Hedbergc73eee92013-04-19 18:35:21 +0300718 /* LE-only devices do not support explicit enablement */
719 if (!lmp_bredr_capable(hdev))
720 return;
721
Johan Hedberg2177bab2013-03-05 20:37:43 +0200722 memset(&cp, 0, sizeof(cp));
723
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700724 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200725 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200726 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200727 }
728
729 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
731 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200732}
733
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300734static void hci_set_event_mask_page_2(struct hci_request *req)
735{
736 struct hci_dev *hdev = req->hdev;
737 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
738
739 /* If Connectionless Slave Broadcast master role is supported
740 * enable all necessary events for it.
741 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800742 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300743 events[1] |= 0x40; /* Triggered Clock Capture */
744 events[1] |= 0x80; /* Synchronization Train Complete */
745 events[2] |= 0x10; /* Slave Page Response Timeout */
746 events[2] |= 0x20; /* CSB Channel Map Change */
747 }
748
749 /* If Connectionless Slave Broadcast slave role is supported
750 * enable all necessary events for it.
751 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800752 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300753 events[2] |= 0x01; /* Synchronization Train Received */
754 events[2] |= 0x02; /* CSB Receive */
755 events[2] |= 0x04; /* CSB Timeout */
756 events[2] |= 0x08; /* Truncated Page Complete */
757 }
758
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800759 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200760 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800761 events[2] |= 0x80;
762
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300763 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
764}
765
Johan Hedberg42c6b122013-03-05 20:37:49 +0200766static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200767{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200768 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300769 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200770
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200771 hci_setup_event_mask(req);
772
Johan Hedberge81be902015-08-30 21:47:20 +0300773 if (hdev->commands[6] & 0x20 &&
774 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800775 struct hci_cp_read_stored_link_key cp;
776
777 bacpy(&cp.bdaddr, BDADDR_ANY);
778 cp.read_all = 0x01;
779 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
780 }
781
Johan Hedberg2177bab2013-03-05 20:37:43 +0200782 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200783 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200784
Marcel Holtmann417287d2014-12-11 20:21:54 +0100785 if (hdev->commands[8] & 0x01)
786 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
787
788 /* Some older Broadcom based Bluetooth 1.2 controllers do not
789 * support the Read Page Scan Type command. Check support for
790 * this command in the bit mask of supported commands.
791 */
792 if (hdev->commands[13] & 0x01)
793 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
794
Andre Guedes9193c6e2014-07-01 18:10:09 -0300795 if (lmp_le_capable(hdev)) {
796 u8 events[8];
797
798 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200799
800 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
801 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300802
803 /* If controller supports the Connection Parameters Request
804 * Link Layer Procedure, enable the corresponding event.
805 */
806 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
807 events[0] |= 0x20; /* LE Remote Connection
808 * Parameter Request
809 */
810
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100811 /* If the controller supports the Data Length Extension
812 * feature, enable the corresponding event.
813 */
814 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
815 events[0] |= 0x40; /* LE Data Length Change */
816
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100817 /* If the controller supports Extended Scanner Filter
818 * Policies, enable the correspondig event.
819 */
820 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
821 events[1] |= 0x04; /* LE Direct Advertising
822 * Report
823 */
824
Marcel Holtmann7d26f5c2015-11-01 09:39:51 +0100825 /* If the controller supports the LE Set Scan Enable command,
826 * enable the corresponding advertising report event.
827 */
828 if (hdev->commands[26] & 0x08)
829 events[0] |= 0x02; /* LE Advertising Report */
830
831 /* If the controller supports the LE Create Connection
832 * command, enable the corresponding event.
833 */
834 if (hdev->commands[26] & 0x10)
835 events[0] |= 0x01; /* LE Connection Complete */
836
837 /* If the controller supports the LE Connection Update
838 * command, enable the corresponding event.
839 */
840 if (hdev->commands[27] & 0x04)
841 events[0] |= 0x04; /* LE Connection Update
842 * Complete
843 */
844
845 /* If the controller supports the LE Read Remote Used Features
846 * command, enable the corresponding event.
847 */
848 if (hdev->commands[27] & 0x20)
849 events[0] |= 0x08; /* LE Read Remote Used
850 * Features Complete
851 */
852
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100853 /* If the controller supports the LE Read Local P-256
854 * Public Key command, enable the corresponding event.
855 */
856 if (hdev->commands[34] & 0x02)
857 events[0] |= 0x80; /* LE Read Local P-256
858 * Public Key Complete
859 */
860
861 /* If the controller supports the LE Generate DHKey
862 * command, enable the corresponding event.
863 */
864 if (hdev->commands[34] & 0x04)
865 events[1] |= 0x01; /* LE Generate DHKey Complete */
866
Andre Guedes9193c6e2014-07-01 18:10:09 -0300867 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
868 events);
869
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200870 if (hdev->commands[25] & 0x40) {
871 /* Read LE Advertising Channel TX Power */
872 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
873 }
874
Marcel Holtmann2ab216a2015-11-01 09:39:48 +0100875 if (hdev->commands[26] & 0x40) {
876 /* Read LE White List Size */
877 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
878 0, NULL);
879 }
880
881 if (hdev->commands[26] & 0x80) {
882 /* Clear LE White List */
883 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
884 }
885
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100886 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
887 /* Read LE Maximum Data Length */
888 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
889
890 /* Read LE Suggested Default Data Length */
891 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
892 }
893
Johan Hedberg42c6b122013-03-05 20:37:49 +0200894 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300895 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300896
897 /* Read features beyond page 1 if available */
898 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
899 struct hci_cp_read_local_ext_features cp;
900
901 cp.page = p;
902 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
903 sizeof(cp), &cp);
904 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200905}
906
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300907static void hci_init4_req(struct hci_request *req, unsigned long opt)
908{
909 struct hci_dev *hdev = req->hdev;
910
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800911 /* Some Broadcom based Bluetooth controllers do not support the
912 * Delete Stored Link Key command. They are clearly indicating its
913 * absence in the bit mask of supported commands.
914 *
915 * Check the supported commands and only if the the command is marked
916 * as supported send it. If not supported assume that the controller
917 * does not have actual support for stored link keys which makes this
918 * command redundant anyway.
919 *
920 * Some controllers indicate that they support handling deleting
921 * stored link keys, but they don't. The quirk lets a driver
922 * just disable this command.
923 */
924 if (hdev->commands[6] & 0x80 &&
925 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
926 struct hci_cp_delete_stored_link_key cp;
927
928 bacpy(&cp.bdaddr, BDADDR_ANY);
929 cp.delete_all = 0x01;
930 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
931 sizeof(cp), &cp);
932 }
933
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300934 /* Set event mask page 2 if the HCI command for it is supported */
935 if (hdev->commands[22] & 0x04)
936 hci_set_event_mask_page_2(req);
937
Marcel Holtmann109e3192014-07-23 19:24:56 +0200938 /* Read local codec list if the HCI command is supported */
939 if (hdev->commands[29] & 0x20)
940 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
941
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200942 /* Get MWS transport configuration if the HCI command is supported */
943 if (hdev->commands[30] & 0x08)
944 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
945
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300946 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800947 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300948 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800949
950 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700951 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800952 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800953 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800954
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800955 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
956 sizeof(support), &support);
957 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300958}
959
Johan Hedberg2177bab2013-03-05 20:37:43 +0200960static int __hci_init(struct hci_dev *hdev)
961{
962 int err;
963
964 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
965 if (err < 0)
966 return err;
967
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200968 if (hci_dev_test_flag(hdev, HCI_SETUP))
969 hci_debugfs_create_basic(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700970
Johan Hedberg2177bab2013-03-05 20:37:43 +0200971 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
972 if (err < 0)
973 return err;
974
Johan Hedberg0af801b2015-02-17 15:05:21 +0200975 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
976 * BR/EDR/LE type controllers. AMP controllers only need the
977 * first two stages of init.
978 */
979 if (hdev->dev_type != HCI_BREDR)
980 return 0;
981
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300982 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
983 if (err < 0)
984 return err;
985
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700986 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
987 if (err < 0)
988 return err;
989
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800990 /* This function is only called when the controller is actually in
991 * configured state. When the controller is marked as unconfigured,
992 * this initialization procedure is not run.
993 *
994 * It means that it is possible that a controller runs through its
995 * setup phase and then discovers missing settings. If that is the
996 * case, then this function will not be called. It then will only
997 * be called during the config phase.
998 *
999 * So only when in setup phase or config phase, create the debugfs
1000 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001001 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001002 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1003 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001004 return 0;
1005
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +01001006 hci_debugfs_create_common(hdev);
1007
Marcel Holtmann71c3b602014-12-20 16:05:15 +01001008 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +01001009 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001010
Marcel Holtmann162a3ba2015-01-14 15:43:11 -08001011 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +01001012 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001013
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001014 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001015}
1016
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001017static void hci_init0_req(struct hci_request *req, unsigned long opt)
1018{
1019 struct hci_dev *hdev = req->hdev;
1020
1021 BT_DBG("%s %ld", hdev->name, opt);
1022
1023 /* Reset */
1024 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1025 hci_reset_req(req, 0);
1026
1027 /* Read Local Version */
1028 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1029
1030 /* Read BD Address */
1031 if (hdev->set_bdaddr)
1032 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1033}
1034
1035static int __hci_unconf_init(struct hci_dev *hdev)
1036{
1037 int err;
1038
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001039 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1040 return 0;
1041
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001042 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1043 if (err < 0)
1044 return err;
1045
Marcel Holtmannf640ee92015-10-08 12:35:42 +02001046 if (hci_dev_test_flag(hdev, HCI_SETUP))
1047 hci_debugfs_create_basic(hdev);
1048
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001049 return 0;
1050}
1051
Johan Hedberg42c6b122013-03-05 20:37:49 +02001052static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053{
1054 __u8 scan = opt;
1055
Johan Hedberg42c6b122013-03-05 20:37:49 +02001056 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
1058 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001059 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060}
1061
Johan Hedberg42c6b122013-03-05 20:37:49 +02001062static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063{
1064 __u8 auth = opt;
1065
Johan Hedberg42c6b122013-03-05 20:37:49 +02001066 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
1068 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001069 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070}
1071
Johan Hedberg42c6b122013-03-05 20:37:49 +02001072static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073{
1074 __u8 encrypt = opt;
1075
Johan Hedberg42c6b122013-03-05 20:37:49 +02001076 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001078 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001079 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080}
1081
Johan Hedberg42c6b122013-03-05 20:37:49 +02001082static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001083{
1084 __le16 policy = cpu_to_le16(opt);
1085
Johan Hedberg42c6b122013-03-05 20:37:49 +02001086 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001087
1088 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001089 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001090}
1091
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001092/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 * Device is held on return. */
1094struct hci_dev *hci_dev_get(int index)
1095{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001096 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
1098 BT_DBG("%d", index);
1099
1100 if (index < 0)
1101 return NULL;
1102
1103 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001104 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 if (d->id == index) {
1106 hdev = hci_dev_hold(d);
1107 break;
1108 }
1109 }
1110 read_unlock(&hci_dev_list_lock);
1111 return hdev;
1112}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
1114/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001115
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001116bool hci_discovery_active(struct hci_dev *hdev)
1117{
1118 struct discovery_state *discov = &hdev->discovery;
1119
Andre Guedes6fbe1952012-02-03 17:47:58 -03001120 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001121 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001122 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001123 return true;
1124
Andre Guedes6fbe1952012-02-03 17:47:58 -03001125 default:
1126 return false;
1127 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001128}
1129
Johan Hedbergff9ef572012-01-04 14:23:45 +02001130void hci_discovery_set_state(struct hci_dev *hdev, int state)
1131{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001132 int old_state = hdev->discovery.state;
1133
Johan Hedbergff9ef572012-01-04 14:23:45 +02001134 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1135
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001136 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001137 return;
1138
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001139 hdev->discovery.state = state;
1140
Johan Hedbergff9ef572012-01-04 14:23:45 +02001141 switch (state) {
1142 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001143 hci_update_background_scan(hdev);
1144
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001145 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001146 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001147 break;
1148 case DISCOVERY_STARTING:
1149 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001150 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001151 mgmt_discovering(hdev, 1);
1152 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001153 case DISCOVERY_RESOLVING:
1154 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001155 case DISCOVERY_STOPPING:
1156 break;
1157 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001158}
1159
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001160void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161{
Johan Hedberg30883512012-01-04 14:16:21 +02001162 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001163 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
Johan Hedberg561aafb2012-01-04 13:31:59 +02001165 list_for_each_entry_safe(p, n, &cache->all, all) {
1166 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001167 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001169
1170 INIT_LIST_HEAD(&cache->unknown);
1171 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172}
1173
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001174struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1175 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176{
Johan Hedberg30883512012-01-04 14:16:21 +02001177 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 struct inquiry_entry *e;
1179
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001180 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181
Johan Hedberg561aafb2012-01-04 13:31:59 +02001182 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001184 return e;
1185 }
1186
1187 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188}
1189
Johan Hedberg561aafb2012-01-04 13:31:59 +02001190struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001191 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001192{
Johan Hedberg30883512012-01-04 14:16:21 +02001193 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001194 struct inquiry_entry *e;
1195
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001196 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001197
1198 list_for_each_entry(e, &cache->unknown, list) {
1199 if (!bacmp(&e->data.bdaddr, bdaddr))
1200 return e;
1201 }
1202
1203 return NULL;
1204}
1205
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001206struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001207 bdaddr_t *bdaddr,
1208 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001209{
1210 struct discovery_state *cache = &hdev->discovery;
1211 struct inquiry_entry *e;
1212
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001213 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001214
1215 list_for_each_entry(e, &cache->resolve, list) {
1216 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1217 return e;
1218 if (!bacmp(&e->data.bdaddr, bdaddr))
1219 return e;
1220 }
1221
1222 return NULL;
1223}
1224
Johan Hedberga3d4e202012-01-09 00:53:02 +02001225void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001226 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001227{
1228 struct discovery_state *cache = &hdev->discovery;
1229 struct list_head *pos = &cache->resolve;
1230 struct inquiry_entry *p;
1231
1232 list_del(&ie->list);
1233
1234 list_for_each_entry(p, &cache->resolve, list) {
1235 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001236 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001237 break;
1238 pos = &p->list;
1239 }
1240
1241 list_add(&ie->list, pos);
1242}
1243
Marcel Holtmannaf589252014-07-01 14:11:20 +02001244u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1245 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246{
Johan Hedberg30883512012-01-04 14:16:21 +02001247 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001248 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001249 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001251 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Johan Hedberg6928a922014-10-26 20:46:09 +01001253 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001254
Marcel Holtmannaf589252014-07-01 14:11:20 +02001255 if (!data->ssp_mode)
1256 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001257
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001258 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001259 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001260 if (!ie->data.ssp_mode)
1261 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001262
Johan Hedberga3d4e202012-01-09 00:53:02 +02001263 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001264 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001265 ie->data.rssi = data->rssi;
1266 hci_inquiry_cache_update_resolve(hdev, ie);
1267 }
1268
Johan Hedberg561aafb2012-01-04 13:31:59 +02001269 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001270 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001271
Johan Hedberg561aafb2012-01-04 13:31:59 +02001272 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001273 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001274 if (!ie) {
1275 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1276 goto done;
1277 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001278
1279 list_add(&ie->all, &cache->all);
1280
1281 if (name_known) {
1282 ie->name_state = NAME_KNOWN;
1283 } else {
1284 ie->name_state = NAME_NOT_KNOWN;
1285 list_add(&ie->list, &cache->unknown);
1286 }
1287
1288update:
1289 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001290 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001291 ie->name_state = NAME_KNOWN;
1292 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 }
1294
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001295 memcpy(&ie->data, data, sizeof(*data));
1296 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001298
1299 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001300 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001301
Marcel Holtmannaf589252014-07-01 14:11:20 +02001302done:
1303 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304}
1305
1306static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1307{
Johan Hedberg30883512012-01-04 14:16:21 +02001308 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 struct inquiry_info *info = (struct inquiry_info *) buf;
1310 struct inquiry_entry *e;
1311 int copied = 0;
1312
Johan Hedberg561aafb2012-01-04 13:31:59 +02001313 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001315
1316 if (copied >= num)
1317 break;
1318
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 bacpy(&info->bdaddr, &data->bdaddr);
1320 info->pscan_rep_mode = data->pscan_rep_mode;
1321 info->pscan_period_mode = data->pscan_period_mode;
1322 info->pscan_mode = data->pscan_mode;
1323 memcpy(info->dev_class, data->dev_class, 3);
1324 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001325
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001327 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 }
1329
1330 BT_DBG("cache %p, copied %d", cache, copied);
1331 return copied;
1332}
1333
Johan Hedberg42c6b122013-03-05 20:37:49 +02001334static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335{
1336 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001337 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 struct hci_cp_inquiry cp;
1339
1340 BT_DBG("%s", hdev->name);
1341
1342 if (test_bit(HCI_INQUIRY, &hdev->flags))
1343 return;
1344
1345 /* Start Inquiry */
1346 memcpy(&cp.lap, &ir->lap, 3);
1347 cp.length = ir->length;
1348 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350}
1351
1352int hci_inquiry(void __user *arg)
1353{
1354 __u8 __user *ptr = arg;
1355 struct hci_inquiry_req ir;
1356 struct hci_dev *hdev;
1357 int err = 0, do_inquiry = 0, max_rsp;
1358 long timeo;
1359 __u8 *buf;
1360
1361 if (copy_from_user(&ir, ptr, sizeof(ir)))
1362 return -EFAULT;
1363
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001364 hdev = hci_dev_get(ir.dev_id);
1365 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 return -ENODEV;
1367
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001368 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001369 err = -EBUSY;
1370 goto done;
1371 }
1372
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001373 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001374 err = -EOPNOTSUPP;
1375 goto done;
1376 }
1377
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001378 if (hdev->dev_type != HCI_BREDR) {
1379 err = -EOPNOTSUPP;
1380 goto done;
1381 }
1382
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001383 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001384 err = -EOPNOTSUPP;
1385 goto done;
1386 }
1387
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001388 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001389 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001390 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001391 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 do_inquiry = 1;
1393 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001394 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
Marcel Holtmann04837f62006-07-03 10:02:33 +02001396 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001397
1398 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001399 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1400 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001401 if (err < 0)
1402 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001403
1404 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1405 * cleared). If it is interrupted by a signal, return -EINTR.
1406 */
NeilBrown74316202014-07-07 15:16:04 +10001407 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001408 TASK_INTERRUPTIBLE))
1409 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001410 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001412 /* for unlimited number of responses we will use buffer with
1413 * 255 entries
1414 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1416
1417 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1418 * copy it to the user space.
1419 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001420 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001421 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 err = -ENOMEM;
1423 goto done;
1424 }
1425
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001426 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001428 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430 BT_DBG("num_rsp %d", ir.num_rsp);
1431
1432 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1433 ptr += sizeof(ir);
1434 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001435 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001437 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 err = -EFAULT;
1439
1440 kfree(buf);
1441
1442done:
1443 hci_dev_put(hdev);
1444 return err;
1445}
1446
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001447static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 int ret = 0;
1450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 BT_DBG("%s %p", hdev->name, hdev);
1452
1453 hci_req_lock(hdev);
1454
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001455 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001456 ret = -ENODEV;
1457 goto done;
1458 }
1459
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001460 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1461 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001462 /* Check for rfkill but allow the HCI setup stage to
1463 * proceed (which in itself doesn't cause any RF activity).
1464 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001465 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001466 ret = -ERFKILL;
1467 goto done;
1468 }
1469
1470 /* Check for valid public address or a configured static
1471 * random adddress, but let the HCI setup proceed to
1472 * be able to determine if there is a public address
1473 * or not.
1474 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001475 * In case of user channel usage, it is not important
1476 * if a public address or static random address is
1477 * available.
1478 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001479 * This check is only valid for BR/EDR controllers
1480 * since AMP controllers do not have an address.
1481 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001482 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001483 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001484 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1485 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1486 ret = -EADDRNOTAVAIL;
1487 goto done;
1488 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001489 }
1490
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 if (test_bit(HCI_UP, &hdev->flags)) {
1492 ret = -EALREADY;
1493 goto done;
1494 }
1495
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 if (hdev->open(hdev)) {
1497 ret = -EIO;
1498 goto done;
1499 }
1500
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001501 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001502 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001503
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001504 atomic_set(&hdev->cmd_cnt, 1);
1505 set_bit(HCI_INIT, &hdev->flags);
1506
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001507 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmanne131d742015-10-20 02:30:47 +02001508 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1509
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001510 if (hdev->setup)
1511 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001512
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001513 /* The transport driver can set these quirks before
1514 * creating the HCI device or in its setup callback.
1515 *
1516 * In case any of them is set, the controller has to
1517 * start up as unconfigured.
1518 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001519 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1520 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001521 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001522
1523 /* For an unconfigured controller it is required to
1524 * read at least the version information provided by
1525 * the Read Local Version Information command.
1526 *
1527 * If the set_bdaddr driver callback is provided, then
1528 * also the original Bluetooth public device address
1529 * will be read using the Read BD Address command.
1530 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001531 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001532 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001533 }
1534
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001535 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001536 /* If public address change is configured, ensure that
1537 * the address gets programmed. If the driver does not
1538 * support changing the public address, fail the power
1539 * on procedure.
1540 */
1541 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1542 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001543 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1544 else
1545 ret = -EADDRNOTAVAIL;
1546 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001547
1548 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001549 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001550 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001551 ret = __hci_init(hdev);
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001552 if (!ret && hdev->post_init)
1553 ret = hdev->post_init(hdev);
1554 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 }
1556
Marcel Holtmann7e995b92015-10-17 16:00:26 +02001557 /* If the HCI Reset command is clearing all diagnostic settings,
1558 * then they need to be reprogrammed after the init procedure
1559 * completed.
1560 */
1561 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1562 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1563 ret = hdev->set_diag(hdev, true);
1564
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001565 clear_bit(HCI_INIT, &hdev->flags);
1566
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 if (!ret) {
1568 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001569 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 set_bit(HCI_UP, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001571 hci_sock_dev_event(hdev, HCI_DEV_UP);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001572 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1573 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1574 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1575 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001576 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001577 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001578 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001579 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001580 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001581 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001583 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001584 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001585 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
1587 skb_queue_purge(&hdev->cmd_q);
1588 skb_queue_purge(&hdev->rx_q);
1589
1590 if (hdev->flush)
1591 hdev->flush(hdev);
1592
1593 if (hdev->sent_cmd) {
1594 kfree_skb(hdev->sent_cmd);
1595 hdev->sent_cmd = NULL;
1596 }
1597
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001598 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001599 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001600
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001602 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 }
1604
1605done:
1606 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 return ret;
1608}
1609
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001610/* ---- HCI ioctl helpers ---- */
1611
1612int hci_dev_open(__u16 dev)
1613{
1614 struct hci_dev *hdev;
1615 int err;
1616
1617 hdev = hci_dev_get(dev);
1618 if (!hdev)
1619 return -ENODEV;
1620
Marcel Holtmann4a964402014-07-02 19:10:33 +02001621 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001622 * up as user channel. Trying to bring them up as normal devices
1623 * will result into a failure. Only user channel operation is
1624 * possible.
1625 *
1626 * When this function is called for a user channel, the flag
1627 * HCI_USER_CHANNEL will be set first before attempting to
1628 * open the device.
1629 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001630 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1631 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001632 err = -EOPNOTSUPP;
1633 goto done;
1634 }
1635
Johan Hedberge1d08f42013-10-01 22:44:50 +03001636 /* We need to ensure that no other power on/off work is pending
1637 * before proceeding to call hci_dev_do_open. This is
1638 * particularly important if the setup procedure has not yet
1639 * completed.
1640 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001641 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001642 cancel_delayed_work(&hdev->power_off);
1643
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001644 /* After this call it is guaranteed that the setup procedure
1645 * has finished. This means that error conditions like RFKILL
1646 * or no valid public or static random address apply.
1647 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001648 flush_workqueue(hdev->req_workqueue);
1649
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001650 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001651 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001652 * so that pairing works for them. Once the management interface
1653 * is in use this bit will be cleared again and userspace has
1654 * to explicitly enable it.
1655 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001656 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1657 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001658 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001659
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001660 err = hci_dev_do_open(hdev);
1661
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001662done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001663 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001664 return err;
1665}
1666
Johan Hedbergd7347f32014-07-04 12:37:23 +03001667/* This function requires the caller holds hdev->lock */
1668static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1669{
1670 struct hci_conn_params *p;
1671
Johan Hedbergf161dd42014-08-15 21:06:54 +03001672 list_for_each_entry(p, &hdev->le_conn_params, list) {
1673 if (p->conn) {
1674 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001675 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001676 p->conn = NULL;
1677 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001678 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001679 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001680
1681 BT_DBG("All LE pending actions cleared");
1682}
1683
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001684int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685{
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001686 bool auto_off;
1687
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 BT_DBG("%s %p", hdev->name, hdev);
1689
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001690 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001691 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001692 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001693 /* Execute vendor specific shutdown routine */
1694 if (hdev->shutdown)
1695 hdev->shutdown(hdev);
1696 }
1697
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001698 cancel_delayed_work(&hdev->power_off);
1699
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 hci_req_cancel(hdev, ENODEV);
1701 hci_req_lock(hdev);
1702
1703 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001704 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 hci_req_unlock(hdev);
1706 return 0;
1707 }
1708
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001709 /* Flush RX and TX works */
1710 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001711 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001713 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001714 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001715 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001716 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1717 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001718 }
1719
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001720 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001721 cancel_delayed_work(&hdev->service_cache);
1722
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001723 cancel_delayed_work_sync(&hdev->le_scan_disable);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08001724 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001725
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001726 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001727 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001728
Florian Grandel5d900e42015-06-18 03:16:35 +02001729 if (hdev->adv_instance_timeout) {
1730 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1731 hdev->adv_instance_timeout = 0;
1732 }
1733
Johan Hedberg76727c02014-11-18 09:00:14 +02001734 /* Avoid potential lockdep warnings from the *_flush() calls by
1735 * ensuring the workqueue is empty up front.
1736 */
1737 drain_workqueue(hdev->workqueue);
1738
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001739 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001740
Johan Hedberg8f502f82015-01-28 19:56:02 +02001741 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1742
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001743 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1744
1745 if (!auto_off && hdev->dev_type == HCI_BREDR)
1746 mgmt_powered(hdev, 0);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001747
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001748 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001749 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001750 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001751 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
Marcel Holtmann64dae962015-01-28 14:10:28 -08001753 smp_unregister(hdev);
1754
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001755 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756
1757 if (hdev->flush)
1758 hdev->flush(hdev);
1759
1760 /* Reset device */
1761 skb_queue_purge(&hdev->cmd_q);
1762 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001763 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1764 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001766 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 clear_bit(HCI_INIT, &hdev->flags);
1768 }
1769
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001770 /* flush cmd work */
1771 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
1773 /* Drop queues */
1774 skb_queue_purge(&hdev->rx_q);
1775 skb_queue_purge(&hdev->cmd_q);
1776 skb_queue_purge(&hdev->raw_q);
1777
1778 /* Drop last sent command */
1779 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001780 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 kfree_skb(hdev->sent_cmd);
1782 hdev->sent_cmd = NULL;
1783 }
1784
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001785 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001786 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001787
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 /* After this point our queues are empty
1789 * and no tasks are scheduled. */
1790 hdev->close(hdev);
1791
Johan Hedberg35b973c2013-03-15 17:06:59 -05001792 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001793 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001794 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001795
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001796 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001797 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001798
Johan Hedberge59fda82012-02-22 18:11:53 +02001799 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001800 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001801 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001802
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 hci_req_unlock(hdev);
1804
1805 hci_dev_put(hdev);
1806 return 0;
1807}
1808
1809int hci_dev_close(__u16 dev)
1810{
1811 struct hci_dev *hdev;
1812 int err;
1813
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001814 hdev = hci_dev_get(dev);
1815 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001817
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001818 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001819 err = -EBUSY;
1820 goto done;
1821 }
1822
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001823 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001824 cancel_delayed_work(&hdev->power_off);
1825
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001827
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001828done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 hci_dev_put(hdev);
1830 return err;
1831}
1832
Marcel Holtmann5c912492015-01-28 11:53:05 -08001833static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001835 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836
Marcel Holtmann5c912492015-01-28 11:53:05 -08001837 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
1839 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 /* Drop queues */
1842 skb_queue_purge(&hdev->rx_q);
1843 skb_queue_purge(&hdev->cmd_q);
1844
Johan Hedberg76727c02014-11-18 09:00:14 +02001845 /* Avoid potential lockdep warnings from the *_flush() calls by
1846 * ensuring the workqueue is empty up front.
1847 */
1848 drain_workqueue(hdev->workqueue);
1849
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001850 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001851 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001853 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854
1855 if (hdev->flush)
1856 hdev->flush(hdev);
1857
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001858 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001859 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001861 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 return ret;
1865}
1866
Marcel Holtmann5c912492015-01-28 11:53:05 -08001867int hci_dev_reset(__u16 dev)
1868{
1869 struct hci_dev *hdev;
1870 int err;
1871
1872 hdev = hci_dev_get(dev);
1873 if (!hdev)
1874 return -ENODEV;
1875
1876 if (!test_bit(HCI_UP, &hdev->flags)) {
1877 err = -ENETDOWN;
1878 goto done;
1879 }
1880
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001881 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001882 err = -EBUSY;
1883 goto done;
1884 }
1885
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001886 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001887 err = -EOPNOTSUPP;
1888 goto done;
1889 }
1890
1891 err = hci_dev_do_reset(hdev);
1892
1893done:
1894 hci_dev_put(hdev);
1895 return err;
1896}
1897
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898int hci_dev_reset_stat(__u16 dev)
1899{
1900 struct hci_dev *hdev;
1901 int ret = 0;
1902
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001903 hdev = hci_dev_get(dev);
1904 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 return -ENODEV;
1906
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001907 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001908 ret = -EBUSY;
1909 goto done;
1910 }
1911
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001912 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001913 ret = -EOPNOTSUPP;
1914 goto done;
1915 }
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1918
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001919done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 return ret;
1922}
1923
Johan Hedberg123abc02014-07-10 12:09:07 +03001924static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1925{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001926 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001927
1928 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1929
1930 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001931 conn_changed = !hci_dev_test_and_set_flag(hdev,
1932 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001933 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001934 conn_changed = hci_dev_test_and_clear_flag(hdev,
1935 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001936
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001937 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001938 discov_changed = !hci_dev_test_and_set_flag(hdev,
1939 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001940 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001941 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001942 discov_changed = hci_dev_test_and_clear_flag(hdev,
1943 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001944 }
1945
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001946 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001947 return;
1948
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001949 if (conn_changed || discov_changed) {
1950 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001951 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001952
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001953 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001954 mgmt_update_adv_data(hdev);
1955
Johan Hedberg123abc02014-07-10 12:09:07 +03001956 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001957 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001958}
1959
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960int hci_dev_cmd(unsigned int cmd, void __user *arg)
1961{
1962 struct hci_dev *hdev;
1963 struct hci_dev_req dr;
1964 int err = 0;
1965
1966 if (copy_from_user(&dr, arg, sizeof(dr)))
1967 return -EFAULT;
1968
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001969 hdev = hci_dev_get(dr.dev_id);
1970 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 return -ENODEV;
1972
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001973 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001974 err = -EBUSY;
1975 goto done;
1976 }
1977
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001978 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001979 err = -EOPNOTSUPP;
1980 goto done;
1981 }
1982
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001983 if (hdev->dev_type != HCI_BREDR) {
1984 err = -EOPNOTSUPP;
1985 goto done;
1986 }
1987
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001988 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001989 err = -EOPNOTSUPP;
1990 goto done;
1991 }
1992
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 switch (cmd) {
1994 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001995 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1996 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 break;
1998
1999 case HCISETENCRYPT:
2000 if (!lmp_encrypt_capable(hdev)) {
2001 err = -EOPNOTSUPP;
2002 break;
2003 }
2004
2005 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2006 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002007 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2008 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 if (err)
2010 break;
2011 }
2012
Johan Hedberg01178cd2013-03-05 20:37:41 +02002013 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2014 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 break;
2016
2017 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002018 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2019 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002020
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002021 /* Ensure that the connectable and discoverable states
2022 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002023 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002024 if (!err)
2025 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 break;
2027
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002028 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002029 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2030 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002031 break;
2032
2033 case HCISETLINKMODE:
2034 hdev->link_mode = ((__u16) dr.dev_opt) &
2035 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2036 break;
2037
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 case HCISETPTYPE:
2039 hdev->pkt_type = (__u16) dr.dev_opt;
2040 break;
2041
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002043 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2044 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 break;
2046
2047 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002048 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2049 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 break;
2051
2052 default:
2053 err = -EINVAL;
2054 break;
2055 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002056
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002057done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 hci_dev_put(hdev);
2059 return err;
2060}
2061
2062int hci_get_dev_list(void __user *arg)
2063{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002064 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 struct hci_dev_list_req *dl;
2066 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 int n = 0, size, err;
2068 __u16 dev_num;
2069
2070 if (get_user(dev_num, (__u16 __user *) arg))
2071 return -EFAULT;
2072
2073 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2074 return -EINVAL;
2075
2076 size = sizeof(*dl) + dev_num * sizeof(*dr);
2077
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002078 dl = kzalloc(size, GFP_KERNEL);
2079 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 return -ENOMEM;
2081
2082 dr = dl->dev_req;
2083
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002084 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002085 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002086 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002087
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002088 /* When the auto-off is configured it means the transport
2089 * is running, but in that case still indicate that the
2090 * device is actually down.
2091 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002092 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002093 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002094
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002096 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 if (++n >= dev_num)
2099 break;
2100 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002101 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
2103 dl->dev_num = n;
2104 size = sizeof(*dl) + n * sizeof(*dr);
2105
2106 err = copy_to_user(arg, dl, size);
2107 kfree(dl);
2108
2109 return err ? -EFAULT : 0;
2110}
2111
2112int hci_get_dev_info(void __user *arg)
2113{
2114 struct hci_dev *hdev;
2115 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002116 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 int err = 0;
2118
2119 if (copy_from_user(&di, arg, sizeof(di)))
2120 return -EFAULT;
2121
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002122 hdev = hci_dev_get(di.dev_id);
2123 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 return -ENODEV;
2125
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002126 /* When the auto-off is configured it means the transport
2127 * is running, but in that case still indicate that the
2128 * device is actually down.
2129 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002130 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002131 flags = hdev->flags & ~BIT(HCI_UP);
2132 else
2133 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002134
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 strcpy(di.name, hdev->name);
2136 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002137 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002138 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002140 if (lmp_bredr_capable(hdev)) {
2141 di.acl_mtu = hdev->acl_mtu;
2142 di.acl_pkts = hdev->acl_pkts;
2143 di.sco_mtu = hdev->sco_mtu;
2144 di.sco_pkts = hdev->sco_pkts;
2145 } else {
2146 di.acl_mtu = hdev->le_mtu;
2147 di.acl_pkts = hdev->le_pkts;
2148 di.sco_mtu = 0;
2149 di.sco_pkts = 0;
2150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 di.link_policy = hdev->link_policy;
2152 di.link_mode = hdev->link_mode;
2153
2154 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2155 memcpy(&di.features, &hdev->features, sizeof(di.features));
2156
2157 if (copy_to_user(arg, &di, sizeof(di)))
2158 err = -EFAULT;
2159
2160 hci_dev_put(hdev);
2161
2162 return err;
2163}
2164
2165/* ---- Interface to HCI drivers ---- */
2166
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002167static int hci_rfkill_set_block(void *data, bool blocked)
2168{
2169 struct hci_dev *hdev = data;
2170
2171 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2172
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002173 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002174 return -EBUSY;
2175
Johan Hedberg5e130362013-09-13 08:58:17 +03002176 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002177 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002178 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2179 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002180 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002181 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002182 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002183 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002184
2185 return 0;
2186}
2187
2188static const struct rfkill_ops hci_rfkill_ops = {
2189 .set_block = hci_rfkill_set_block,
2190};
2191
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002192static void hci_power_on(struct work_struct *work)
2193{
2194 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002195 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002196
2197 BT_DBG("%s", hdev->name);
2198
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002199 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002200 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302201 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002202 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302203 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002204 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002205 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002206
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002207 /* During the HCI setup phase, a few error conditions are
2208 * ignored and they need to be checked now. If they are still
2209 * valid, it is important to turn the device back off.
2210 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002211 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2212 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002213 (hdev->dev_type == HCI_BREDR &&
2214 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2215 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002216 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002217 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002218 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002219 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2220 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002221 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002222
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002223 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002224 /* For unconfigured devices, set the HCI_RAW flag
2225 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002226 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002227 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002228 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002229
2230 /* For fully configured devices, this will send
2231 * the Index Added event. For unconfigured devices,
2232 * it will send Unconfigued Index Added event.
2233 *
2234 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2235 * and no event will be send.
2236 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002237 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002238 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002239 /* When the controller is now configured, then it
2240 * is important to clear the HCI_RAW flag.
2241 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002242 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002243 clear_bit(HCI_RAW, &hdev->flags);
2244
Marcel Holtmannd603b762014-07-06 12:11:14 +02002245 /* Powering on the controller with HCI_CONFIG set only
2246 * happens with the transition from unconfigured to
2247 * configured. This will send the Index Added event.
2248 */
2249 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002250 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002251}
2252
2253static void hci_power_off(struct work_struct *work)
2254{
Johan Hedberg32435532011-11-07 22:16:04 +02002255 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002256 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002257
2258 BT_DBG("%s", hdev->name);
2259
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002260 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002261}
2262
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002263static void hci_error_reset(struct work_struct *work)
2264{
2265 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2266
2267 BT_DBG("%s", hdev->name);
2268
2269 if (hdev->hw_error)
2270 hdev->hw_error(hdev, hdev->hw_error_code);
2271 else
2272 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2273 hdev->hw_error_code);
2274
2275 if (hci_dev_do_close(hdev))
2276 return;
2277
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002278 hci_dev_do_open(hdev);
2279}
2280
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002281static void hci_discov_off(struct work_struct *work)
2282{
2283 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002284
2285 hdev = container_of(work, struct hci_dev, discov_off.work);
2286
2287 BT_DBG("%s", hdev->name);
2288
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002289 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002290}
2291
Florian Grandel5d900e42015-06-18 03:16:35 +02002292static void hci_adv_timeout_expire(struct work_struct *work)
2293{
2294 struct hci_dev *hdev;
2295
2296 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2297
2298 BT_DBG("%s", hdev->name);
2299
2300 mgmt_adv_timeout_expired(hdev);
2301}
2302
Johan Hedberg35f74982014-02-18 17:14:32 +02002303void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002304{
Johan Hedberg48210022013-01-27 00:31:28 +02002305 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002306
Johan Hedberg48210022013-01-27 00:31:28 +02002307 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2308 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002309 kfree(uuid);
2310 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002311}
2312
Johan Hedberg35f74982014-02-18 17:14:32 +02002313void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002314{
Johan Hedberg0378b592014-11-19 15:22:22 +02002315 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002316
Johan Hedberg0378b592014-11-19 15:22:22 +02002317 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2318 list_del_rcu(&key->list);
2319 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002320 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002321}
2322
Johan Hedberg35f74982014-02-18 17:14:32 +02002323void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002324{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002325 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002326
Johan Hedberg970d0f12014-11-13 14:37:47 +02002327 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2328 list_del_rcu(&k->list);
2329 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002330 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002331}
2332
Johan Hedberg970c4e42014-02-18 10:19:33 +02002333void hci_smp_irks_clear(struct hci_dev *hdev)
2334{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002335 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002336
Johan Hedbergadae20c2014-11-13 14:37:48 +02002337 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2338 list_del_rcu(&k->list);
2339 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002340 }
2341}
2342
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002343struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2344{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002345 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002346
Johan Hedberg0378b592014-11-19 15:22:22 +02002347 rcu_read_lock();
2348 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2349 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2350 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002351 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002352 }
2353 }
2354 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002355
2356 return NULL;
2357}
2358
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302359static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002360 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002361{
2362 /* Legacy key */
2363 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302364 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002365
2366 /* Debug keys are insecure so don't store them persistently */
2367 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302368 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002369
2370 /* Changed combination key and there's no previous one */
2371 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302372 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002373
2374 /* Security mode 3 case */
2375 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302376 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002377
Johan Hedberge3befab2014-06-01 16:33:39 +03002378 /* BR/EDR key derived using SC from an LE link */
2379 if (conn->type == LE_LINK)
2380 return true;
2381
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002382 /* Neither local nor remote side had no-bonding as requirement */
2383 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302384 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002385
2386 /* Local side had dedicated bonding as requirement */
2387 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302388 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002389
2390 /* Remote side had dedicated bonding as requirement */
2391 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302392 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002393
2394 /* If none of the above criteria match, then don't store the key
2395 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302396 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002397}
2398
Johan Hedberge804d252014-07-16 11:42:28 +03002399static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002400{
Johan Hedberge804d252014-07-16 11:42:28 +03002401 if (type == SMP_LTK)
2402 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002403
Johan Hedberge804d252014-07-16 11:42:28 +03002404 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002405}
2406
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002407struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2408 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002409{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002410 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002411
Johan Hedberg970d0f12014-11-13 14:37:47 +02002412 rcu_read_lock();
2413 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002414 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2415 continue;
2416
Johan Hedberg923e2412014-12-03 12:43:39 +02002417 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002418 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002419 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002420 }
2421 }
2422 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002423
2424 return NULL;
2425}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002426
Johan Hedberg970c4e42014-02-18 10:19:33 +02002427struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2428{
2429 struct smp_irk *irk;
2430
Johan Hedbergadae20c2014-11-13 14:37:48 +02002431 rcu_read_lock();
2432 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2433 if (!bacmp(&irk->rpa, rpa)) {
2434 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002435 return irk;
2436 }
2437 }
2438
Johan Hedbergadae20c2014-11-13 14:37:48 +02002439 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2440 if (smp_irk_matches(hdev, irk->val, rpa)) {
2441 bacpy(&irk->rpa, rpa);
2442 rcu_read_unlock();
2443 return irk;
2444 }
2445 }
2446 rcu_read_unlock();
2447
Johan Hedberg970c4e42014-02-18 10:19:33 +02002448 return NULL;
2449}
2450
2451struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2452 u8 addr_type)
2453{
2454 struct smp_irk *irk;
2455
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002456 /* Identity Address must be public or static random */
2457 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2458 return NULL;
2459
Johan Hedbergadae20c2014-11-13 14:37:48 +02002460 rcu_read_lock();
2461 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002462 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002463 bacmp(bdaddr, &irk->bdaddr) == 0) {
2464 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002465 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002466 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002467 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002468 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002469
2470 return NULL;
2471}
2472
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002473struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002474 bdaddr_t *bdaddr, u8 *val, u8 type,
2475 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002476{
2477 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302478 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002479
2480 old_key = hci_find_link_key(hdev, bdaddr);
2481 if (old_key) {
2482 old_key_type = old_key->type;
2483 key = old_key;
2484 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002485 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002486 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002487 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002488 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002489 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002490 }
2491
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002492 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002493
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002494 /* Some buggy controller combinations generate a changed
2495 * combination key for legacy pairing even when there's no
2496 * previous key */
2497 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002498 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002499 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002500 if (conn)
2501 conn->key_type = type;
2502 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002503
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002504 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002505 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002506 key->pin_len = pin_len;
2507
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002508 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002509 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002510 else
2511 key->type = type;
2512
Johan Hedberg7652ff62014-06-24 13:15:49 +03002513 if (persistent)
2514 *persistent = hci_persistent_key(hdev, conn, type,
2515 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002516
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002517 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002518}
2519
Johan Hedbergca9142b2014-02-19 14:57:44 +02002520struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002521 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002522 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002523{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002524 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002525 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002526
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002527 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002528 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002529 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002530 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002531 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002532 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002533 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002534 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002535 }
2536
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002537 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002538 key->bdaddr_type = addr_type;
2539 memcpy(key->val, tk, sizeof(key->val));
2540 key->authenticated = authenticated;
2541 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002542 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002543 key->enc_size = enc_size;
2544 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002545
Johan Hedbergca9142b2014-02-19 14:57:44 +02002546 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002547}
2548
Johan Hedbergca9142b2014-02-19 14:57:44 +02002549struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2550 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002551{
2552 struct smp_irk *irk;
2553
2554 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2555 if (!irk) {
2556 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2557 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002558 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002559
2560 bacpy(&irk->bdaddr, bdaddr);
2561 irk->addr_type = addr_type;
2562
Johan Hedbergadae20c2014-11-13 14:37:48 +02002563 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002564 }
2565
2566 memcpy(irk->val, val, 16);
2567 bacpy(&irk->rpa, rpa);
2568
Johan Hedbergca9142b2014-02-19 14:57:44 +02002569 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002570}
2571
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002572int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2573{
2574 struct link_key *key;
2575
2576 key = hci_find_link_key(hdev, bdaddr);
2577 if (!key)
2578 return -ENOENT;
2579
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002580 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002581
Johan Hedberg0378b592014-11-19 15:22:22 +02002582 list_del_rcu(&key->list);
2583 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002584
2585 return 0;
2586}
2587
Johan Hedberge0b2b272014-02-18 17:14:31 +02002588int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002589{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002590 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002591 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002592
Johan Hedberg970d0f12014-11-13 14:37:47 +02002593 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002594 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002595 continue;
2596
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002597 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002598
Johan Hedberg970d0f12014-11-13 14:37:47 +02002599 list_del_rcu(&k->list);
2600 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002601 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002602 }
2603
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002604 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002605}
2606
Johan Hedberga7ec7332014-02-18 17:14:35 +02002607void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2608{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002609 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002610
Johan Hedbergadae20c2014-11-13 14:37:48 +02002611 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002612 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2613 continue;
2614
2615 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2616
Johan Hedbergadae20c2014-11-13 14:37:48 +02002617 list_del_rcu(&k->list);
2618 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002619 }
2620}
2621
Johan Hedberg55e76b32015-03-10 22:34:40 +02002622bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2623{
2624 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002625 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002626 u8 addr_type;
2627
2628 if (type == BDADDR_BREDR) {
2629 if (hci_find_link_key(hdev, bdaddr))
2630 return true;
2631 return false;
2632 }
2633
2634 /* Convert to HCI addr type which struct smp_ltk uses */
2635 if (type == BDADDR_LE_PUBLIC)
2636 addr_type = ADDR_LE_DEV_PUBLIC;
2637 else
2638 addr_type = ADDR_LE_DEV_RANDOM;
2639
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002640 irk = hci_get_irk(hdev, bdaddr, addr_type);
2641 if (irk) {
2642 bdaddr = &irk->bdaddr;
2643 addr_type = irk->addr_type;
2644 }
2645
Johan Hedberg55e76b32015-03-10 22:34:40 +02002646 rcu_read_lock();
2647 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002648 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2649 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002650 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002651 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002652 }
2653 rcu_read_unlock();
2654
2655 return false;
2656}
2657
Ville Tervo6bd32322011-02-16 16:32:41 +02002658/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002659static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002660{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002661 struct hci_dev *hdev = container_of(work, struct hci_dev,
2662 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002663
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002664 if (hdev->sent_cmd) {
2665 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2666 u16 opcode = __le16_to_cpu(sent->opcode);
2667
2668 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2669 } else {
2670 BT_ERR("%s command tx timeout", hdev->name);
2671 }
2672
Ville Tervo6bd32322011-02-16 16:32:41 +02002673 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002674 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002675}
2676
Szymon Janc2763eda2011-03-22 13:12:22 +01002677struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002678 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002679{
2680 struct oob_data *data;
2681
Johan Hedberg6928a922014-10-26 20:46:09 +01002682 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2683 if (bacmp(bdaddr, &data->bdaddr) != 0)
2684 continue;
2685 if (data->bdaddr_type != bdaddr_type)
2686 continue;
2687 return data;
2688 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002689
2690 return NULL;
2691}
2692
Johan Hedberg6928a922014-10-26 20:46:09 +01002693int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2694 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002695{
2696 struct oob_data *data;
2697
Johan Hedberg6928a922014-10-26 20:46:09 +01002698 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002699 if (!data)
2700 return -ENOENT;
2701
Johan Hedberg6928a922014-10-26 20:46:09 +01002702 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002703
2704 list_del(&data->list);
2705 kfree(data);
2706
2707 return 0;
2708}
2709
Johan Hedberg35f74982014-02-18 17:14:32 +02002710void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002711{
2712 struct oob_data *data, *n;
2713
2714 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2715 list_del(&data->list);
2716 kfree(data);
2717 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002718}
2719
Marcel Holtmann07988722014-01-10 02:07:29 -08002720int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002721 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002722 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002723{
2724 struct oob_data *data;
2725
Johan Hedberg6928a922014-10-26 20:46:09 +01002726 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002727 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002728 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002729 if (!data)
2730 return -ENOMEM;
2731
2732 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002733 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002734 list_add(&data->list, &hdev->remote_oob_data);
2735 }
2736
Johan Hedberg81328d52014-10-26 20:33:47 +01002737 if (hash192 && rand192) {
2738 memcpy(data->hash192, hash192, sizeof(data->hash192));
2739 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002740 if (hash256 && rand256)
2741 data->present = 0x03;
Johan Hedberg81328d52014-10-26 20:33:47 +01002742 } else {
2743 memset(data->hash192, 0, sizeof(data->hash192));
2744 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002745 if (hash256 && rand256)
2746 data->present = 0x02;
2747 else
2748 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002749 }
2750
Johan Hedberg81328d52014-10-26 20:33:47 +01002751 if (hash256 && rand256) {
2752 memcpy(data->hash256, hash256, sizeof(data->hash256));
2753 memcpy(data->rand256, rand256, sizeof(data->rand256));
2754 } else {
2755 memset(data->hash256, 0, sizeof(data->hash256));
2756 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002757 if (hash192 && rand192)
2758 data->present = 0x01;
Johan Hedberg81328d52014-10-26 20:33:47 +01002759 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002760
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002761 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002762
2763 return 0;
2764}
2765
Florian Grandeld2609b32015-06-18 03:16:34 +02002766/* This function requires the caller holds hdev->lock */
2767struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2768{
2769 struct adv_info *adv_instance;
2770
2771 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2772 if (adv_instance->instance == instance)
2773 return adv_instance;
2774 }
2775
2776 return NULL;
2777}
2778
2779/* This function requires the caller holds hdev->lock */
2780struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2781 struct adv_info *cur_instance;
2782
2783 cur_instance = hci_find_adv_instance(hdev, instance);
2784 if (!cur_instance)
2785 return NULL;
2786
2787 if (cur_instance == list_last_entry(&hdev->adv_instances,
2788 struct adv_info, list))
2789 return list_first_entry(&hdev->adv_instances,
2790 struct adv_info, list);
2791 else
2792 return list_next_entry(cur_instance, list);
2793}
2794
2795/* This function requires the caller holds hdev->lock */
2796int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2797{
2798 struct adv_info *adv_instance;
2799
2800 adv_instance = hci_find_adv_instance(hdev, instance);
2801 if (!adv_instance)
2802 return -ENOENT;
2803
2804 BT_DBG("%s removing %dMR", hdev->name, instance);
2805
Florian Grandel5d900e42015-06-18 03:16:35 +02002806 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2807 cancel_delayed_work(&hdev->adv_instance_expire);
2808 hdev->adv_instance_timeout = 0;
2809 }
2810
Florian Grandeld2609b32015-06-18 03:16:34 +02002811 list_del(&adv_instance->list);
2812 kfree(adv_instance);
2813
2814 hdev->adv_instance_cnt--;
2815
2816 return 0;
2817}
2818
2819/* This function requires the caller holds hdev->lock */
2820void hci_adv_instances_clear(struct hci_dev *hdev)
2821{
2822 struct adv_info *adv_instance, *n;
2823
Florian Grandel5d900e42015-06-18 03:16:35 +02002824 if (hdev->adv_instance_timeout) {
2825 cancel_delayed_work(&hdev->adv_instance_expire);
2826 hdev->adv_instance_timeout = 0;
2827 }
2828
Florian Grandeld2609b32015-06-18 03:16:34 +02002829 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2830 list_del(&adv_instance->list);
2831 kfree(adv_instance);
2832 }
2833
2834 hdev->adv_instance_cnt = 0;
2835}
2836
2837/* This function requires the caller holds hdev->lock */
2838int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2839 u16 adv_data_len, u8 *adv_data,
2840 u16 scan_rsp_len, u8 *scan_rsp_data,
2841 u16 timeout, u16 duration)
2842{
2843 struct adv_info *adv_instance;
2844
2845 adv_instance = hci_find_adv_instance(hdev, instance);
2846 if (adv_instance) {
2847 memset(adv_instance->adv_data, 0,
2848 sizeof(adv_instance->adv_data));
2849 memset(adv_instance->scan_rsp_data, 0,
2850 sizeof(adv_instance->scan_rsp_data));
2851 } else {
2852 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2853 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2854 return -EOVERFLOW;
2855
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002856 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002857 if (!adv_instance)
2858 return -ENOMEM;
2859
Florian Grandelfffd38b2015-06-18 03:16:47 +02002860 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002861 adv_instance->instance = instance;
2862 list_add(&adv_instance->list, &hdev->adv_instances);
2863 hdev->adv_instance_cnt++;
2864 }
2865
2866 adv_instance->flags = flags;
2867 adv_instance->adv_data_len = adv_data_len;
2868 adv_instance->scan_rsp_len = scan_rsp_len;
2869
2870 if (adv_data_len)
2871 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2872
2873 if (scan_rsp_len)
2874 memcpy(adv_instance->scan_rsp_data,
2875 scan_rsp_data, scan_rsp_len);
2876
2877 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002878 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002879
2880 if (duration == 0)
2881 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2882 else
2883 adv_instance->duration = duration;
2884
2885 BT_DBG("%s for %dMR", hdev->name, instance);
2886
2887 return 0;
2888}
2889
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002890struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002891 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002892{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002893 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002894
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002895 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002896 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002897 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002898 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002899
2900 return NULL;
2901}
2902
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002903void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002904{
2905 struct list_head *p, *n;
2906
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002907 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002908 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002909
2910 list_del(p);
2911 kfree(b);
2912 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002913}
2914
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002915int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002916{
2917 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002918
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002919 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002920 return -EBADF;
2921
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002922 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002923 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002924
Johan Hedberg27f70f32014-07-21 10:50:06 +03002925 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002926 if (!entry)
2927 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002928
2929 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002930 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002931
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002932 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002933
2934 return 0;
2935}
2936
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002937int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002938{
2939 struct bdaddr_list *entry;
2940
Johan Hedberg35f74982014-02-18 17:14:32 +02002941 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002942 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002943 return 0;
2944 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002945
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002946 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002947 if (!entry)
2948 return -ENOENT;
2949
2950 list_del(&entry->list);
2951 kfree(entry);
2952
2953 return 0;
2954}
2955
Andre Guedes15819a72014-02-03 13:56:18 -03002956/* This function requires the caller holds hdev->lock */
2957struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2958 bdaddr_t *addr, u8 addr_type)
2959{
2960 struct hci_conn_params *params;
2961
2962 list_for_each_entry(params, &hdev->le_conn_params, list) {
2963 if (bacmp(&params->addr, addr) == 0 &&
2964 params->addr_type == addr_type) {
2965 return params;
2966 }
2967 }
2968
2969 return NULL;
2970}
2971
2972/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002973struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2974 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002975{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002976 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002977
Johan Hedberg501f8822014-07-04 12:37:26 +03002978 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002979 if (bacmp(&param->addr, addr) == 0 &&
2980 param->addr_type == addr_type)
2981 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002982 }
2983
2984 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002985}
2986
2987/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002988struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2989 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002990{
2991 struct hci_conn_params *params;
2992
2993 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002994 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002995 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002996
2997 params = kzalloc(sizeof(*params), GFP_KERNEL);
2998 if (!params) {
2999 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003000 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003001 }
3002
3003 bacpy(&params->addr, addr);
3004 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003005
3006 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003007 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003008
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003009 params->conn_min_interval = hdev->le_conn_min_interval;
3010 params->conn_max_interval = hdev->le_conn_max_interval;
3011 params->conn_latency = hdev->le_conn_latency;
3012 params->supervision_timeout = hdev->le_supv_timeout;
3013 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3014
3015 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3016
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003017 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003018}
3019
Johan Hedbergf6c63242014-08-15 21:06:59 +03003020static void hci_conn_params_free(struct hci_conn_params *params)
3021{
3022 if (params->conn) {
3023 hci_conn_drop(params->conn);
3024 hci_conn_put(params->conn);
3025 }
3026
3027 list_del(&params->action);
3028 list_del(&params->list);
3029 kfree(params);
3030}
3031
Andre Guedes15819a72014-02-03 13:56:18 -03003032/* This function requires the caller holds hdev->lock */
3033void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3034{
3035 struct hci_conn_params *params;
3036
3037 params = hci_conn_params_lookup(hdev, addr, addr_type);
3038 if (!params)
3039 return;
3040
Johan Hedbergf6c63242014-08-15 21:06:59 +03003041 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003042
Johan Hedberg95305ba2014-07-04 12:37:21 +03003043 hci_update_background_scan(hdev);
3044
Andre Guedes15819a72014-02-03 13:56:18 -03003045 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3046}
3047
3048/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003049void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003050{
3051 struct hci_conn_params *params, *tmp;
3052
3053 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03003054 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3055 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02003056
3057 /* If trying to estabilish one time connection to disabled
3058 * device, leave the params, but mark them as just once.
3059 */
3060 if (params->explicit_connect) {
3061 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3062 continue;
3063 }
3064
Andre Guedes15819a72014-02-03 13:56:18 -03003065 list_del(&params->list);
3066 kfree(params);
3067 }
3068
Johan Hedberg55af49a2014-07-02 17:37:26 +03003069 BT_DBG("All LE disabled connection parameters were removed");
3070}
3071
3072/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003073void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003074{
3075 struct hci_conn_params *params, *tmp;
3076
Johan Hedbergf6c63242014-08-15 21:06:59 +03003077 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3078 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003079
Johan Hedberga2f41a82014-07-04 12:37:19 +03003080 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003081
Andre Guedes15819a72014-02-03 13:56:18 -03003082 BT_DBG("All LE connection parameters were removed");
3083}
3084
Marcel Holtmann1904a852015-01-11 13:50:44 -08003085static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003086{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003087 if (status) {
3088 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003089
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003090 hci_dev_lock(hdev);
3091 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3092 hci_dev_unlock(hdev);
3093 return;
3094 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003095}
3096
Marcel Holtmann1904a852015-01-11 13:50:44 -08003097static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3098 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003099{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003100 /* General inquiry access code (GIAC) */
3101 u8 lap[3] = { 0x33, 0x8b, 0x9e };
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003102 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003103 int err;
3104
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003105 if (status) {
3106 BT_ERR("Failed to disable LE scanning: status %d", status);
3107 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003108 }
3109
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003110 hdev->discovery.scan_start = 0;
3111
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003112 switch (hdev->discovery.type) {
3113 case DISCOV_TYPE_LE:
3114 hci_dev_lock(hdev);
3115 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3116 hci_dev_unlock(hdev);
3117 break;
3118
3119 case DISCOV_TYPE_INTERLEAVED:
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003120 hci_dev_lock(hdev);
3121
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003122 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3123 &hdev->quirks)) {
3124 /* If we were running LE only scan, change discovery
3125 * state. If we were running both LE and BR/EDR inquiry
3126 * simultaneously, and BR/EDR inquiry is already
3127 * finished, stop discovery, otherwise BR/EDR inquiry
Wesley Kuo177d0502015-05-13 10:33:15 +08003128 * will stop discovery when finished. If we will resolve
3129 * remote device name, do not change discovery state.
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003130 */
Wesley Kuo177d0502015-05-13 10:33:15 +08003131 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3132 hdev->discovery.state != DISCOVERY_RESOLVING)
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003133 hci_discovery_set_state(hdev,
3134 DISCOVERY_STOPPED);
3135 } else {
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003136 struct hci_request req;
3137
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003138 hci_inquiry_cache_flush(hdev);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003139
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003140 hci_req_init(&req, hdev);
3141
3142 memset(&cp, 0, sizeof(cp));
3143 memcpy(&cp.lap, lap, sizeof(cp.lap));
3144 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3145 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3146
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003147 err = hci_req_run(&req, inquiry_complete);
3148 if (err) {
3149 BT_ERR("Inquiry request failed: err %d", err);
3150 hci_discovery_set_state(hdev,
3151 DISCOVERY_STOPPED);
3152 }
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003153 }
3154
3155 hci_dev_unlock(hdev);
3156 break;
3157 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003158}
3159
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003160static void le_scan_disable_work(struct work_struct *work)
3161{
3162 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003163 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003164 struct hci_request req;
3165 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003166
3167 BT_DBG("%s", hdev->name);
3168
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003169 cancel_delayed_work_sync(&hdev->le_scan_restart);
3170
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003171 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003172
Andre Guedesb1efcc22014-02-26 20:21:40 -03003173 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003174
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003175 err = hci_req_run(&req, le_scan_disable_work_complete);
3176 if (err)
3177 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003178}
3179
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003180static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3181 u16 opcode)
3182{
3183 unsigned long timeout, duration, scan_start, now;
3184
3185 BT_DBG("%s", hdev->name);
3186
3187 if (status) {
3188 BT_ERR("Failed to restart LE scan: status %d", status);
3189 return;
3190 }
3191
3192 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3193 !hdev->discovery.scan_start)
3194 return;
3195
3196 /* When the scan was started, hdev->le_scan_disable has been queued
3197 * after duration from scan_start. During scan restart this job
3198 * has been canceled, and we need to queue it again after proper
3199 * timeout, to make sure that scan does not run indefinitely.
3200 */
3201 duration = hdev->discovery.scan_duration;
3202 scan_start = hdev->discovery.scan_start;
3203 now = jiffies;
3204 if (now - scan_start <= duration) {
3205 int elapsed;
3206
3207 if (now >= scan_start)
3208 elapsed = now - scan_start;
3209 else
3210 elapsed = ULONG_MAX - scan_start + now;
3211
3212 timeout = duration - elapsed;
3213 } else {
3214 timeout = 0;
3215 }
3216 queue_delayed_work(hdev->workqueue,
3217 &hdev->le_scan_disable, timeout);
3218}
3219
3220static void le_scan_restart_work(struct work_struct *work)
3221{
3222 struct hci_dev *hdev = container_of(work, struct hci_dev,
3223 le_scan_restart.work);
3224 struct hci_request req;
3225 struct hci_cp_le_set_scan_enable cp;
3226 int err;
3227
3228 BT_DBG("%s", hdev->name);
3229
3230 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003231 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003232 return;
3233
3234 hci_req_init(&req, hdev);
3235
3236 hci_req_add_le_scan_disable(&req);
3237
3238 memset(&cp, 0, sizeof(cp));
3239 cp.enable = LE_SCAN_ENABLE;
3240 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3241 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3242
3243 err = hci_req_run(&req, le_scan_restart_work_complete);
3244 if (err)
3245 BT_ERR("Restart LE scan request failed: err %d", err);
3246}
3247
Johan Hedberga1f4c312014-02-27 14:05:41 +02003248/* Copy the Identity Address of the controller.
3249 *
3250 * If the controller has a public BD_ADDR, then by default use that one.
3251 * If this is a LE only controller without a public address, default to
3252 * the static random address.
3253 *
3254 * For debugging purposes it is possible to force controllers with a
3255 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003256 *
3257 * In case BR/EDR has been disabled on a dual-mode controller and
3258 * userspace has configured a static address, then that address
3259 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02003260 */
3261void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3262 u8 *bdaddr_type)
3263{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07003264 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003265 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003266 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003267 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02003268 bacpy(bdaddr, &hdev->static_addr);
3269 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3270 } else {
3271 bacpy(bdaddr, &hdev->bdaddr);
3272 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3273 }
3274}
3275
David Herrmann9be0dab2012-04-22 14:39:57 +02003276/* Alloc HCI device */
3277struct hci_dev *hci_alloc_dev(void)
3278{
3279 struct hci_dev *hdev;
3280
Johan Hedberg27f70f32014-07-21 10:50:06 +03003281 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003282 if (!hdev)
3283 return NULL;
3284
David Herrmannb1b813d2012-04-22 14:39:58 +02003285 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3286 hdev->esco_type = (ESCO_HV1);
3287 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003288 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3289 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003290 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003291 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3292 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02003293 hdev->adv_instance_cnt = 0;
3294 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02003295 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02003296
David Herrmannb1b813d2012-04-22 14:39:58 +02003297 hdev->sniff_max_interval = 800;
3298 hdev->sniff_min_interval = 80;
3299
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003300 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003301 hdev->le_adv_min_interval = 0x0800;
3302 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003303 hdev->le_scan_interval = 0x0060;
3304 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003305 hdev->le_conn_min_interval = 0x0028;
3306 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003307 hdev->le_conn_latency = 0x0000;
3308 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01003309 hdev->le_def_tx_len = 0x001b;
3310 hdev->le_def_tx_time = 0x0148;
3311 hdev->le_max_tx_len = 0x001b;
3312 hdev->le_max_tx_time = 0x0148;
3313 hdev->le_max_rx_len = 0x001b;
3314 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003315
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003316 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003317 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003318 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3319 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003320
David Herrmannb1b813d2012-04-22 14:39:58 +02003321 mutex_init(&hdev->lock);
3322 mutex_init(&hdev->req_lock);
3323
3324 INIT_LIST_HEAD(&hdev->mgmt_pending);
3325 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003326 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003327 INIT_LIST_HEAD(&hdev->uuids);
3328 INIT_LIST_HEAD(&hdev->link_keys);
3329 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003330 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003331 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003332 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003333 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003334 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003335 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003336 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02003337 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02003338
3339 INIT_WORK(&hdev->rx_work, hci_rx_work);
3340 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3341 INIT_WORK(&hdev->tx_work, hci_tx_work);
3342 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003343 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003344
David Herrmannb1b813d2012-04-22 14:39:58 +02003345 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3346 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3347 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003348 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Florian Grandel5d900e42015-06-18 03:16:35 +02003349 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
David Herrmannb1b813d2012-04-22 14:39:58 +02003350
David Herrmannb1b813d2012-04-22 14:39:58 +02003351 skb_queue_head_init(&hdev->rx_q);
3352 skb_queue_head_init(&hdev->cmd_q);
3353 skb_queue_head_init(&hdev->raw_q);
3354
3355 init_waitqueue_head(&hdev->req_wait_q);
3356
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003357 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003358
David Herrmannb1b813d2012-04-22 14:39:58 +02003359 hci_init_sysfs(hdev);
3360 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003361
3362 return hdev;
3363}
3364EXPORT_SYMBOL(hci_alloc_dev);
3365
3366/* Free HCI device */
3367void hci_free_dev(struct hci_dev *hdev)
3368{
David Herrmann9be0dab2012-04-22 14:39:57 +02003369 /* will free via device release */
3370 put_device(&hdev->dev);
3371}
3372EXPORT_SYMBOL(hci_free_dev);
3373
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374/* Register HCI device */
3375int hci_register_dev(struct hci_dev *hdev)
3376{
David Herrmannb1b813d2012-04-22 14:39:58 +02003377 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378
Marcel Holtmann74292d52014-07-06 15:50:27 +02003379 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 return -EINVAL;
3381
Mat Martineau08add512011-11-02 16:18:36 -07003382 /* Do not allow HCI_AMP devices to register at index 0,
3383 * so the index can be used as the AMP controller ID.
3384 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003385 switch (hdev->dev_type) {
3386 case HCI_BREDR:
3387 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3388 break;
3389 case HCI_AMP:
3390 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3391 break;
3392 default:
3393 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003395
Sasha Levin3df92b32012-05-27 22:36:56 +02003396 if (id < 0)
3397 return id;
3398
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399 sprintf(hdev->name, "hci%d", id);
3400 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003401
3402 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3403
Kees Cookd8537542013-07-03 15:04:57 -07003404 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3405 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003406 if (!hdev->workqueue) {
3407 error = -ENOMEM;
3408 goto err;
3409 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003410
Kees Cookd8537542013-07-03 15:04:57 -07003411 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3412 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003413 if (!hdev->req_workqueue) {
3414 destroy_workqueue(hdev->workqueue);
3415 error = -ENOMEM;
3416 goto err;
3417 }
3418
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003419 if (!IS_ERR_OR_NULL(bt_debugfs))
3420 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3421
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003422 dev_set_name(&hdev->dev, "%s", hdev->name);
3423
3424 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003425 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003426 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003428 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003429 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3430 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003431 if (hdev->rfkill) {
3432 if (rfkill_register(hdev->rfkill) < 0) {
3433 rfkill_destroy(hdev->rfkill);
3434 hdev->rfkill = NULL;
3435 }
3436 }
3437
Johan Hedberg5e130362013-09-13 08:58:17 +03003438 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003439 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003440
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003441 hci_dev_set_flag(hdev, HCI_SETUP);
3442 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003443
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003444 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003445 /* Assume BR/EDR support until proven otherwise (such as
3446 * through reading supported features during init.
3447 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003448 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003449 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003450
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003451 write_lock(&hci_dev_list_lock);
3452 list_add(&hdev->list, &hci_dev_list);
3453 write_unlock(&hci_dev_list_lock);
3454
Marcel Holtmann4a964402014-07-02 19:10:33 +02003455 /* Devices that are marked for raw-only usage are unconfigured
3456 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003457 */
3458 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003459 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003460
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003461 hci_sock_dev_event(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003462 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463
Johan Hedberg19202572013-01-14 22:33:51 +02003464 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003465
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003467
David Herrmann33ca9542011-10-08 14:58:49 +02003468err_wqueue:
3469 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003470 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003471err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003472 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003473
David Herrmann33ca9542011-10-08 14:58:49 +02003474 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475}
3476EXPORT_SYMBOL(hci_register_dev);
3477
3478/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003479void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003481 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003482
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003483 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003485 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003486
Sasha Levin3df92b32012-05-27 22:36:56 +02003487 id = hdev->id;
3488
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003489 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003491 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492
3493 hci_dev_do_close(hdev);
3494
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003495 cancel_work_sync(&hdev->power_on);
3496
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003497 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003498 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3499 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003500 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003501 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003502 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003503 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003504
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003505 /* mgmt_index_removed should take care of emptying the
3506 * pending list */
3507 BUG_ON(!list_empty(&hdev->mgmt_pending));
3508
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003509 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003511 if (hdev->rfkill) {
3512 rfkill_unregister(hdev->rfkill);
3513 rfkill_destroy(hdev->rfkill);
3514 }
3515
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003516 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003517
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003518 debugfs_remove_recursive(hdev->debugfs);
3519
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003520 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003521 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003522
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003523 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003524 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003525 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003526 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003527 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003528 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003529 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003530 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003531 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003532 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003533 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003534 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003535 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003536
David Herrmanndc946bd2012-01-07 15:47:24 +01003537 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003538
3539 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540}
3541EXPORT_SYMBOL(hci_unregister_dev);
3542
3543/* Suspend HCI device */
3544int hci_suspend_dev(struct hci_dev *hdev)
3545{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003546 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547 return 0;
3548}
3549EXPORT_SYMBOL(hci_suspend_dev);
3550
3551/* Resume HCI device */
3552int hci_resume_dev(struct hci_dev *hdev)
3553{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003554 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555 return 0;
3556}
3557EXPORT_SYMBOL(hci_resume_dev);
3558
Marcel Holtmann75e05692014-11-02 08:15:38 +01003559/* Reset HCI device */
3560int hci_reset_dev(struct hci_dev *hdev)
3561{
3562 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3563 struct sk_buff *skb;
3564
3565 skb = bt_skb_alloc(3, GFP_ATOMIC);
3566 if (!skb)
3567 return -ENOMEM;
3568
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003569 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
Marcel Holtmann75e05692014-11-02 08:15:38 +01003570 memcpy(skb_put(skb, 3), hw_err, 3);
3571
3572 /* Send Hardware Error to upper stack */
3573 return hci_recv_frame(hdev, skb);
3574}
3575EXPORT_SYMBOL(hci_reset_dev);
3576
Marcel Holtmann76bca882009-11-18 00:40:39 +01003577/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003578int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003579{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003580 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003581 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003582 kfree_skb(skb);
3583 return -ENXIO;
3584 }
3585
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003586 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3587 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3588 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmannfe806dc2015-10-08 03:14:28 +02003589 kfree_skb(skb);
3590 return -EINVAL;
3591 }
3592
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003593 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003594 bt_cb(skb)->incoming = 1;
3595
3596 /* Time stamp */
3597 __net_timestamp(skb);
3598
Marcel Holtmann76bca882009-11-18 00:40:39 +01003599 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003600 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003601
Marcel Holtmann76bca882009-11-18 00:40:39 +01003602 return 0;
3603}
3604EXPORT_SYMBOL(hci_recv_frame);
3605
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003606/* Receive diagnostic message from HCI drivers */
3607int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3608{
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003609 /* Mark as diagnostic packet */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003610 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003611
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003612 /* Time stamp */
3613 __net_timestamp(skb);
3614
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003615 skb_queue_tail(&hdev->rx_q, skb);
3616 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003617
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003618 return 0;
3619}
3620EXPORT_SYMBOL(hci_recv_diag);
3621
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622/* ---- Interface to upper protocols ---- */
3623
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624int hci_register_cb(struct hci_cb *cb)
3625{
3626 BT_DBG("%p name %s", cb, cb->name);
3627
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003628 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003629 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003630 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631
3632 return 0;
3633}
3634EXPORT_SYMBOL(hci_register_cb);
3635
3636int hci_unregister_cb(struct hci_cb *cb)
3637{
3638 BT_DBG("%p name %s", cb, cb->name);
3639
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003640 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003642 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643
3644 return 0;
3645}
3646EXPORT_SYMBOL(hci_unregister_cb);
3647
Marcel Holtmann51086992013-10-10 14:54:19 -07003648static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003650 int err;
3651
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003652 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3653 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003655 /* Time stamp */
3656 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003658 /* Send copy to monitor */
3659 hci_send_to_monitor(hdev, skb);
3660
3661 if (atomic_read(&hdev->promisc)) {
3662 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003663 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 }
3665
3666 /* Get rid of skb owner, prior to sending to the driver. */
3667 skb_orphan(skb);
3668
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003669 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3670 kfree_skb(skb);
3671 return;
3672 }
3673
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003674 err = hdev->send(hdev, skb);
3675 if (err < 0) {
3676 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3677 kfree_skb(skb);
3678 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679}
3680
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003681/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003682int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3683 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003684{
3685 struct sk_buff *skb;
3686
3687 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3688
3689 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3690 if (!skb) {
3691 BT_ERR("%s no memory for command", hdev->name);
3692 return -ENOMEM;
3693 }
3694
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003695 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003696 * single-command requests.
3697 */
Johan Hedberg44d27132015-11-05 09:31:40 +02003698 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg11714b32013-03-05 20:37:47 +02003699
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003701 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702
3703 return 0;
3704}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705
3706/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003707void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708{
3709 struct hci_command_hdr *hdr;
3710
3711 if (!hdev->sent_cmd)
3712 return NULL;
3713
3714 hdr = (void *) hdev->sent_cmd->data;
3715
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003716 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 return NULL;
3718
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003719 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003720
3721 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3722}
3723
Loic Poulainfbef1682015-09-29 15:05:44 +02003724/* Send HCI command and wait for command commplete event */
3725struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3726 const void *param, u32 timeout)
3727{
3728 struct sk_buff *skb;
3729
3730 if (!test_bit(HCI_UP, &hdev->flags))
3731 return ERR_PTR(-ENETDOWN);
3732
3733 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3734
3735 hci_req_lock(hdev);
3736 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3737 hci_req_unlock(hdev);
3738
3739 return skb;
3740}
3741EXPORT_SYMBOL(hci_cmd_sync);
3742
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743/* Send ACL data */
3744static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3745{
3746 struct hci_acl_hdr *hdr;
3747 int len = skb->len;
3748
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003749 skb_push(skb, HCI_ACL_HDR_SIZE);
3750 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003751 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003752 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3753 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003754}
3755
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003756static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003757 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003759 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760 struct hci_dev *hdev = conn->hdev;
3761 struct sk_buff *list;
3762
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003763 skb->len = skb_headlen(skb);
3764 skb->data_len = 0;
3765
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003766 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003767
3768 switch (hdev->dev_type) {
3769 case HCI_BREDR:
3770 hci_add_acl_hdr(skb, conn->handle, flags);
3771 break;
3772 case HCI_AMP:
3773 hci_add_acl_hdr(skb, chan->handle, flags);
3774 break;
3775 default:
3776 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3777 return;
3778 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003779
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003780 list = skb_shinfo(skb)->frag_list;
3781 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782 /* Non fragmented */
3783 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3784
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003785 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786 } else {
3787 /* Fragmented */
3788 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3789
3790 skb_shinfo(skb)->frag_list = NULL;
3791
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003792 /* Queue all fragments atomically. We need to use spin_lock_bh
3793 * here because of 6LoWPAN links, as there this function is
3794 * called from softirq and using normal spin lock could cause
3795 * deadlocks.
3796 */
3797 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003799 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003800
3801 flags &= ~ACL_START;
3802 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803 do {
3804 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003805
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003806 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003807 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808
3809 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3810
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003811 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812 } while (list);
3813
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003814 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003816}
3817
3818void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3819{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003820 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003821
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003822 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003823
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003824 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003826 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003827}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828
3829/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003830void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831{
3832 struct hci_dev *hdev = conn->hdev;
3833 struct hci_sco_hdr hdr;
3834
3835 BT_DBG("%s len %d", hdev->name, skb->len);
3836
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003837 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 hdr.dlen = skb->len;
3839
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003840 skb_push(skb, HCI_SCO_HDR_SIZE);
3841 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003842 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003843
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01003844 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003845
Linus Torvalds1da177e2005-04-16 15:20:36 -07003846 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003847 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849
3850/* ---- HCI TX task (outgoing data) ---- */
3851
3852/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003853static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3854 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855{
3856 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003857 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003858 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003860 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003862
3863 rcu_read_lock();
3864
3865 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003866 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003868
3869 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3870 continue;
3871
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872 num++;
3873
3874 if (c->sent < min) {
3875 min = c->sent;
3876 conn = c;
3877 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003878
3879 if (hci_conn_num(hdev, type) == num)
3880 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881 }
3882
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003883 rcu_read_unlock();
3884
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003886 int cnt, q;
3887
3888 switch (conn->type) {
3889 case ACL_LINK:
3890 cnt = hdev->acl_cnt;
3891 break;
3892 case SCO_LINK:
3893 case ESCO_LINK:
3894 cnt = hdev->sco_cnt;
3895 break;
3896 case LE_LINK:
3897 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3898 break;
3899 default:
3900 cnt = 0;
3901 BT_ERR("Unknown link type");
3902 }
3903
3904 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 *quote = q ? q : 1;
3906 } else
3907 *quote = 0;
3908
3909 BT_DBG("conn %p quote %d", conn, *quote);
3910 return conn;
3911}
3912
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003913static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003914{
3915 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003916 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917
Ville Tervobae1f5d92011-02-10 22:38:53 -03003918 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003919
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003920 rcu_read_lock();
3921
Linus Torvalds1da177e2005-04-16 15:20:36 -07003922 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003923 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003924 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003925 BT_ERR("%s killing stalled connection %pMR",
3926 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003927 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928 }
3929 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003930
3931 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932}
3933
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003934static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3935 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003936{
3937 struct hci_conn_hash *h = &hdev->conn_hash;
3938 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003939 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003940 struct hci_conn *conn;
3941 int cnt, q, conn_num = 0;
3942
3943 BT_DBG("%s", hdev->name);
3944
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003945 rcu_read_lock();
3946
3947 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003948 struct hci_chan *tmp;
3949
3950 if (conn->type != type)
3951 continue;
3952
3953 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3954 continue;
3955
3956 conn_num++;
3957
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003958 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003959 struct sk_buff *skb;
3960
3961 if (skb_queue_empty(&tmp->data_q))
3962 continue;
3963
3964 skb = skb_peek(&tmp->data_q);
3965 if (skb->priority < cur_prio)
3966 continue;
3967
3968 if (skb->priority > cur_prio) {
3969 num = 0;
3970 min = ~0;
3971 cur_prio = skb->priority;
3972 }
3973
3974 num++;
3975
3976 if (conn->sent < min) {
3977 min = conn->sent;
3978 chan = tmp;
3979 }
3980 }
3981
3982 if (hci_conn_num(hdev, type) == conn_num)
3983 break;
3984 }
3985
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003986 rcu_read_unlock();
3987
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003988 if (!chan)
3989 return NULL;
3990
3991 switch (chan->conn->type) {
3992 case ACL_LINK:
3993 cnt = hdev->acl_cnt;
3994 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003995 case AMP_LINK:
3996 cnt = hdev->block_cnt;
3997 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003998 case SCO_LINK:
3999 case ESCO_LINK:
4000 cnt = hdev->sco_cnt;
4001 break;
4002 case LE_LINK:
4003 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4004 break;
4005 default:
4006 cnt = 0;
4007 BT_ERR("Unknown link type");
4008 }
4009
4010 q = cnt / num;
4011 *quote = q ? q : 1;
4012 BT_DBG("chan %p quote %d", chan, *quote);
4013 return chan;
4014}
4015
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004016static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4017{
4018 struct hci_conn_hash *h = &hdev->conn_hash;
4019 struct hci_conn *conn;
4020 int num = 0;
4021
4022 BT_DBG("%s", hdev->name);
4023
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004024 rcu_read_lock();
4025
4026 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004027 struct hci_chan *chan;
4028
4029 if (conn->type != type)
4030 continue;
4031
4032 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4033 continue;
4034
4035 num++;
4036
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004037 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004038 struct sk_buff *skb;
4039
4040 if (chan->sent) {
4041 chan->sent = 0;
4042 continue;
4043 }
4044
4045 if (skb_queue_empty(&chan->data_q))
4046 continue;
4047
4048 skb = skb_peek(&chan->data_q);
4049 if (skb->priority >= HCI_PRIO_MAX - 1)
4050 continue;
4051
4052 skb->priority = HCI_PRIO_MAX - 1;
4053
4054 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004055 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004056 }
4057
4058 if (hci_conn_num(hdev, type) == num)
4059 break;
4060 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004061
4062 rcu_read_unlock();
4063
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004064}
4065
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004066static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4067{
4068 /* Calculate count of blocks used by this packet */
4069 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4070}
4071
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004072static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004074 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004075 /* ACL tx timeout must be longer than maximum
4076 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004077 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004078 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004079 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004081}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004083static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004084{
4085 unsigned int cnt = hdev->acl_cnt;
4086 struct hci_chan *chan;
4087 struct sk_buff *skb;
4088 int quote;
4089
4090 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004091
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004092 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004093 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004094 u32 priority = (skb_peek(&chan->data_q))->priority;
4095 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004096 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004097 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004098
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004099 /* Stop if priority has changed */
4100 if (skb->priority < priority)
4101 break;
4102
4103 skb = skb_dequeue(&chan->data_q);
4104
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004105 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004106 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004107
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004108 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109 hdev->acl_last_tx = jiffies;
4110
4111 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004112 chan->sent++;
4113 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114 }
4115 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004116
4117 if (cnt != hdev->acl_cnt)
4118 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119}
4120
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004121static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004122{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004123 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004124 struct hci_chan *chan;
4125 struct sk_buff *skb;
4126 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004127 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004128
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004129 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004130
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004131 BT_DBG("%s", hdev->name);
4132
4133 if (hdev->dev_type == HCI_AMP)
4134 type = AMP_LINK;
4135 else
4136 type = ACL_LINK;
4137
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004138 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004139 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004140 u32 priority = (skb_peek(&chan->data_q))->priority;
4141 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4142 int blocks;
4143
4144 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004145 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004146
4147 /* Stop if priority has changed */
4148 if (skb->priority < priority)
4149 break;
4150
4151 skb = skb_dequeue(&chan->data_q);
4152
4153 blocks = __get_blocks(hdev, skb);
4154 if (blocks > hdev->block_cnt)
4155 return;
4156
4157 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004158 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004159
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004160 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004161 hdev->acl_last_tx = jiffies;
4162
4163 hdev->block_cnt -= blocks;
4164 quote -= blocks;
4165
4166 chan->sent += blocks;
4167 chan->conn->sent += blocks;
4168 }
4169 }
4170
4171 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004172 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004173}
4174
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004175static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004176{
4177 BT_DBG("%s", hdev->name);
4178
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004179 /* No ACL link over BR/EDR controller */
4180 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4181 return;
4182
4183 /* No AMP link over AMP controller */
4184 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004185 return;
4186
4187 switch (hdev->flow_ctl_mode) {
4188 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4189 hci_sched_acl_pkt(hdev);
4190 break;
4191
4192 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4193 hci_sched_acl_blk(hdev);
4194 break;
4195 }
4196}
4197
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004199static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200{
4201 struct hci_conn *conn;
4202 struct sk_buff *skb;
4203 int quote;
4204
4205 BT_DBG("%s", hdev->name);
4206
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004207 if (!hci_conn_num(hdev, SCO_LINK))
4208 return;
4209
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4211 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4212 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004213 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214
4215 conn->sent++;
4216 if (conn->sent == ~0)
4217 conn->sent = 0;
4218 }
4219 }
4220}
4221
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004222static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004223{
4224 struct hci_conn *conn;
4225 struct sk_buff *skb;
4226 int quote;
4227
4228 BT_DBG("%s", hdev->name);
4229
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004230 if (!hci_conn_num(hdev, ESCO_LINK))
4231 return;
4232
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004233 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4234 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004235 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4236 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004237 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004238
4239 conn->sent++;
4240 if (conn->sent == ~0)
4241 conn->sent = 0;
4242 }
4243 }
4244}
4245
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004246static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004247{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004248 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004249 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004250 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004251
4252 BT_DBG("%s", hdev->name);
4253
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004254 if (!hci_conn_num(hdev, LE_LINK))
4255 return;
4256
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004257 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004258 /* LE tx timeout must be longer than maximum
4259 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004260 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004261 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004262 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004263 }
4264
4265 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004266 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004267 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004268 u32 priority = (skb_peek(&chan->data_q))->priority;
4269 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004270 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004271 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004272
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004273 /* Stop if priority has changed */
4274 if (skb->priority < priority)
4275 break;
4276
4277 skb = skb_dequeue(&chan->data_q);
4278
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004279 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004280 hdev->le_last_tx = jiffies;
4281
4282 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004283 chan->sent++;
4284 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004285 }
4286 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004287
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004288 if (hdev->le_pkts)
4289 hdev->le_cnt = cnt;
4290 else
4291 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004292
4293 if (cnt != tmp)
4294 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004295}
4296
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004297static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004299 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 struct sk_buff *skb;
4301
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004302 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004303 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004305 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07004306 /* Schedule queues and send stuff to HCI driver */
4307 hci_sched_acl(hdev);
4308 hci_sched_sco(hdev);
4309 hci_sched_esco(hdev);
4310 hci_sched_le(hdev);
4311 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004312
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313 /* Send next queued raw (unknown type) packet */
4314 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004315 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316}
4317
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004318/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319
4320/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004321static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322{
4323 struct hci_acl_hdr *hdr = (void *) skb->data;
4324 struct hci_conn *conn;
4325 __u16 handle, flags;
4326
4327 skb_pull(skb, HCI_ACL_HDR_SIZE);
4328
4329 handle = __le16_to_cpu(hdr->handle);
4330 flags = hci_flags(handle);
4331 handle = hci_handle(handle);
4332
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004333 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004334 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335
4336 hdev->stat.acl_rx++;
4337
4338 hci_dev_lock(hdev);
4339 conn = hci_conn_hash_lookup_handle(hdev, handle);
4340 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004341
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004343 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004344
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004346 l2cap_recv_acldata(conn, skb, flags);
4347 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004349 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004350 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351 }
4352
4353 kfree_skb(skb);
4354}
4355
4356/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004357static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358{
4359 struct hci_sco_hdr *hdr = (void *) skb->data;
4360 struct hci_conn *conn;
4361 __u16 handle;
4362
4363 skb_pull(skb, HCI_SCO_HDR_SIZE);
4364
4365 handle = __le16_to_cpu(hdr->handle);
4366
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004367 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368
4369 hdev->stat.sco_rx++;
4370
4371 hci_dev_lock(hdev);
4372 conn = hci_conn_hash_lookup_handle(hdev, handle);
4373 hci_dev_unlock(hdev);
4374
4375 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004377 sco_recv_scodata(conn, skb);
4378 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004379 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004380 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004381 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004382 }
4383
4384 kfree_skb(skb);
4385}
4386
Johan Hedberg9238f362013-03-05 20:37:48 +02004387static bool hci_req_is_complete(struct hci_dev *hdev)
4388{
4389 struct sk_buff *skb;
4390
4391 skb = skb_peek(&hdev->cmd_q);
4392 if (!skb)
4393 return true;
4394
Johan Hedberg44d27132015-11-05 09:31:40 +02004395 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
Johan Hedberg9238f362013-03-05 20:37:48 +02004396}
4397
Johan Hedberg42c6b122013-03-05 20:37:49 +02004398static void hci_resend_last(struct hci_dev *hdev)
4399{
4400 struct hci_command_hdr *sent;
4401 struct sk_buff *skb;
4402 u16 opcode;
4403
4404 if (!hdev->sent_cmd)
4405 return;
4406
4407 sent = (void *) hdev->sent_cmd->data;
4408 opcode = __le16_to_cpu(sent->opcode);
4409 if (opcode == HCI_OP_RESET)
4410 return;
4411
4412 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4413 if (!skb)
4414 return;
4415
4416 skb_queue_head(&hdev->cmd_q, skb);
4417 queue_work(hdev->workqueue, &hdev->cmd_work);
4418}
4419
Johan Hedberge62144872015-04-02 13:41:08 +03004420void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4421 hci_req_complete_t *req_complete,
4422 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004423{
Johan Hedberg9238f362013-03-05 20:37:48 +02004424 struct sk_buff *skb;
4425 unsigned long flags;
4426
4427 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4428
Johan Hedberg42c6b122013-03-05 20:37:49 +02004429 /* If the completed command doesn't match the last one that was
4430 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004431 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004432 if (!hci_sent_cmd_data(hdev, opcode)) {
4433 /* Some CSR based controllers generate a spontaneous
4434 * reset complete event during init and any pending
4435 * command will never be completed. In such a case we
4436 * need to resend whatever was the last sent
4437 * command.
4438 */
4439 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4440 hci_resend_last(hdev);
4441
Johan Hedberg9238f362013-03-05 20:37:48 +02004442 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004443 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004444
4445 /* If the command succeeded and there's still more commands in
4446 * this request the request is not yet complete.
4447 */
4448 if (!status && !hci_req_is_complete(hdev))
4449 return;
4450
4451 /* If this was the last command in a request the complete
4452 * callback would be found in hdev->sent_cmd instead of the
4453 * command queue (hdev->cmd_q).
4454 */
Johan Hedberg44d27132015-11-05 09:31:40 +02004455 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4456 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
Johan Hedberge62144872015-04-02 13:41:08 +03004457 return;
4458 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004459
Johan Hedberg44d27132015-11-05 09:31:40 +02004460 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4461 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
Johan Hedberge62144872015-04-02 13:41:08 +03004462 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004463 }
4464
4465 /* Remove all pending commands belonging to this request */
4466 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4467 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Johan Hedberg44d27132015-11-05 09:31:40 +02004468 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004469 __skb_queue_head(&hdev->cmd_q, skb);
4470 break;
4471 }
4472
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004473 *req_complete = bt_cb(skb)->hci.req_complete;
4474 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
Johan Hedberg9238f362013-03-05 20:37:48 +02004475 kfree_skb(skb);
4476 }
4477 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004478}
4479
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004480static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004482 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 struct sk_buff *skb;
4484
4485 BT_DBG("%s", hdev->name);
4486
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004488 /* Send copy to monitor */
4489 hci_send_to_monitor(hdev, skb);
4490
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491 if (atomic_read(&hdev->promisc)) {
4492 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004493 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494 }
4495
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004496 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497 kfree_skb(skb);
4498 continue;
4499 }
4500
4501 if (test_bit(HCI_INIT, &hdev->flags)) {
4502 /* Don't process data packets in this states. */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004503 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504 case HCI_ACLDATA_PKT:
4505 case HCI_SCODATA_PKT:
4506 kfree_skb(skb);
4507 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 }
4510
4511 /* Process frame */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01004512 switch (hci_skb_pkt_type(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004514 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515 hci_event_packet(hdev, skb);
4516 break;
4517
4518 case HCI_ACLDATA_PKT:
4519 BT_DBG("%s ACL data packet", hdev->name);
4520 hci_acldata_packet(hdev, skb);
4521 break;
4522
4523 case HCI_SCODATA_PKT:
4524 BT_DBG("%s SCO data packet", hdev->name);
4525 hci_scodata_packet(hdev, skb);
4526 break;
4527
4528 default:
4529 kfree_skb(skb);
4530 break;
4531 }
4532 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533}
4534
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004535static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004537 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004538 struct sk_buff *skb;
4539
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004540 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4541 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004544 if (atomic_read(&hdev->cmd_cnt)) {
4545 skb = skb_dequeue(&hdev->cmd_q);
4546 if (!skb)
4547 return;
4548
Wei Yongjun7585b972009-02-25 18:29:52 +08004549 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004550
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004551 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004552 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004554 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004555 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004556 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004557 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004558 schedule_delayed_work(&hdev->cmd_timer,
4559 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560 } else {
4561 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004562 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004563 }
4564 }
4565}