blob: 62edbf1b114e8371fe7167eb81d75c59ebe47908 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020054DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070068/* ---- HCI debugfs entries ---- */
69
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070070static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
71 size_t count, loff_t *ppos)
72{
73 struct hci_dev *hdev = file->private_data;
74 char buf[3];
75
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070076 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077 buf[1] = '\n';
78 buf[2] = '\0';
79 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
80}
81
82static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
83 size_t count, loff_t *ppos)
84{
85 struct hci_dev *hdev = file->private_data;
86 struct sk_buff *skb;
87 char buf[32];
88 size_t buf_size = min(count, (sizeof(buf)-1));
89 bool enable;
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070090
91 if (!test_bit(HCI_UP, &hdev->flags))
92 return -ENETDOWN;
93
94 if (copy_from_user(buf, user_buf, buf_size))
95 return -EFAULT;
96
97 buf[buf_size] = '\0';
98 if (strtobool(buf, &enable))
99 return -EINVAL;
100
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700101 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700102 return -EALREADY;
103
104 hci_req_lock(hdev);
105 if (enable)
106 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 else
109 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
110 HCI_CMD_TIMEOUT);
111 hci_req_unlock(hdev);
112
113 if (IS_ERR(skb))
114 return PTR_ERR(skb);
115
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700116 kfree_skb(skb);
117
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700118 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200130static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
131 size_t count, loff_t *ppos)
132{
133 struct hci_dev *hdev = file->private_data;
134 char buf[3];
135
136 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
137 buf[1] = '\n';
138 buf[2] = '\0';
139 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
140}
141
142static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
143 size_t count, loff_t *ppos)
144{
145 struct hci_dev *hdev = file->private_data;
146 char buf[32];
147 size_t buf_size = min(count, (sizeof(buf)-1));
148 bool enable;
149 int err;
150
151 if (copy_from_user(buf, user_buf, buf_size))
152 return -EFAULT;
153
154 buf[buf_size] = '\0';
155 if (strtobool(buf, &enable))
156 return -EINVAL;
157
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200158 /* When the diagnostic flags are not persistent and the transport
159 * is not active, then there is no need for the vendor callback.
160 *
161 * Instead just store the desired value. If needed the setting
162 * will be programmed when the controller gets powered on.
163 */
164 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
165 !test_bit(HCI_RUNNING, &hdev->flags))
166 goto done;
167
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200168 hci_req_lock(hdev);
169 err = hdev->set_diag(hdev, enable);
170 hci_req_unlock(hdev);
171
172 if (err < 0)
173 return err;
174
Marcel Holtmann7e995b92015-10-17 16:00:26 +0200175done:
Marcel Holtmann4b4113d2015-10-07 19:52:35 +0200176 if (enable)
177 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
178 else
179 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
180
181 return count;
182}
183
184static const struct file_operations vendor_diag_fops = {
185 .open = simple_open,
186 .read = vendor_diag_read,
187 .write = vendor_diag_write,
188 .llseek = default_llseek,
189};
190
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200191static void hci_debugfs_create_basic(struct hci_dev *hdev)
192{
193 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
194 &dut_mode_fops);
195
196 if (hdev->set_diag)
197 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
198 &vendor_diag_fops);
199}
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201/* ---- HCI requests ---- */
202
Johan Hedbergf60cb302015-04-02 13:41:09 +0300203static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
204 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200206 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208 if (hdev->req_status == HCI_REQ_PEND) {
209 hdev->req_result = result;
210 hdev->req_status = HCI_REQ_DONE;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300211 if (skb)
212 hdev->req_skb = skb_get(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 wake_up_interruptible(&hdev->req_wait_q);
214 }
215}
216
217static void hci_req_cancel(struct hci_dev *hdev, int err)
218{
219 BT_DBG("%s err 0x%2.2x", hdev->name, err);
220
221 if (hdev->req_status == HCI_REQ_PEND) {
222 hdev->req_result = err;
223 hdev->req_status = HCI_REQ_CANCELED;
224 wake_up_interruptible(&hdev->req_wait_q);
225 }
226}
227
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300228struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300229 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300230{
231 DECLARE_WAITQUEUE(wait, current);
232 struct hci_request req;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300233 struct sk_buff *skb;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300234 int err = 0;
235
236 BT_DBG("%s", hdev->name);
237
238 hci_req_init(&req, hdev);
239
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300240 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300241
242 hdev->req_status = HCI_REQ_PEND;
243
Johan Hedberg75e84b72013-04-02 13:35:04 +0300244 add_wait_queue(&hdev->req_wait_q, &wait);
245 set_current_state(TASK_INTERRUPTIBLE);
246
Johan Hedbergf60cb302015-04-02 13:41:09 +0300247 err = hci_req_run_skb(&req, hci_req_sync_complete);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900248 if (err < 0) {
249 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200250 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900251 return ERR_PTR(err);
252 }
253
Johan Hedberg75e84b72013-04-02 13:35:04 +0300254 schedule_timeout(timeout);
255
256 remove_wait_queue(&hdev->req_wait_q, &wait);
257
258 if (signal_pending(current))
259 return ERR_PTR(-EINTR);
260
261 switch (hdev->req_status) {
262 case HCI_REQ_DONE:
263 err = -bt_to_errno(hdev->req_result);
264 break;
265
266 case HCI_REQ_CANCELED:
267 err = -hdev->req_result;
268 break;
269
270 default:
271 err = -ETIMEDOUT;
272 break;
273 }
274
275 hdev->req_status = hdev->req_result = 0;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300276 skb = hdev->req_skb;
277 hdev->req_skb = NULL;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300278
279 BT_DBG("%s end: err %d", hdev->name, err);
280
Johan Hedbergf60cb302015-04-02 13:41:09 +0300281 if (err < 0) {
282 kfree_skb(skb);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300283 return ERR_PTR(err);
Johan Hedbergf60cb302015-04-02 13:41:09 +0300284 }
Johan Hedberg75e84b72013-04-02 13:35:04 +0300285
Johan Hedberg757aa0b2015-04-02 13:41:12 +0300286 if (!skb)
287 return ERR_PTR(-ENODATA);
288
289 return skb;
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300290}
291EXPORT_SYMBOL(__hci_cmd_sync_ev);
292
293struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300294 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300295{
296 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300297}
298EXPORT_SYMBOL(__hci_cmd_sync);
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200301static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200302 void (*func)(struct hci_request *req,
303 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200304 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200306 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 DECLARE_WAITQUEUE(wait, current);
308 int err = 0;
309
310 BT_DBG("%s start", hdev->name);
311
Johan Hedberg42c6b122013-03-05 20:37:49 +0200312 hci_req_init(&req, hdev);
313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 hdev->req_status = HCI_REQ_PEND;
315
Johan Hedberg42c6b122013-03-05 20:37:49 +0200316 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200317
Chan-yeol Park039fada2014-10-31 14:23:06 +0900318 add_wait_queue(&hdev->req_wait_q, &wait);
319 set_current_state(TASK_INTERRUPTIBLE);
320
Johan Hedbergf60cb302015-04-02 13:41:09 +0300321 err = hci_req_run_skb(&req, hci_req_sync_complete);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200322 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200323 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300324
Chan-yeol Park039fada2014-10-31 14:23:06 +0900325 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200326 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900327
Andre Guedes920c8302013-03-08 11:20:15 -0300328 /* ENODATA means the HCI request command queue is empty.
329 * This can happen when a request with conditionals doesn't
330 * trigger any commands to be sent. This is normal behavior
331 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200332 */
Andre Guedes920c8302013-03-08 11:20:15 -0300333 if (err == -ENODATA)
334 return 0;
335
336 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200337 }
338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 schedule_timeout(timeout);
340
341 remove_wait_queue(&hdev->req_wait_q, &wait);
342
343 if (signal_pending(current))
344 return -EINTR;
345
346 switch (hdev->req_status) {
347 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700348 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 break;
350
351 case HCI_REQ_CANCELED:
352 err = -hdev->req_result;
353 break;
354
355 default:
356 err = -ETIMEDOUT;
357 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700358 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
Johan Hedberga5040ef2011-01-10 13:28:59 +0200360 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
362 BT_DBG("%s end: err %d", hdev->name, err);
363
364 return err;
365}
366
Johan Hedberg01178cd2013-03-05 20:37:41 +0200367static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200368 void (*req)(struct hci_request *req,
369 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200370 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
372 int ret;
373
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200374 if (!test_bit(HCI_UP, &hdev->flags))
375 return -ENETDOWN;
376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 /* Serialize all requests */
378 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200379 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 hci_req_unlock(hdev);
381
382 return ret;
383}
384
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 set_bit(HCI_RESET, &req->hdev->flags);
391 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392}
393
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200396 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200399 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200401 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200402 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200403
404 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200405 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406}
407
Johan Hedberg0af801b2015-02-17 15:05:21 +0200408static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200409{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200411
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200412 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200413 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300414
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700415 /* Read Local Supported Commands */
416 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
417
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300418 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300420
421 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200422 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700423
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700424 /* Read Flow Control Mode */
425 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
426
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700427 /* Read Location Data */
428 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200429}
430
Johan Hedberg0af801b2015-02-17 15:05:21 +0200431static void amp_init2(struct hci_request *req)
432{
433 /* Read Local Supported Features. Not all AMP controllers
434 * support this so it's placed conditionally in the second
435 * stage init.
436 */
437 if (req->hdev->commands[14] & 0x20)
438 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
439}
440
Johan Hedberg42c6b122013-03-05 20:37:49 +0200441static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200442{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200443 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200444
445 BT_DBG("%s %ld", hdev->name, opt);
446
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300447 /* Reset */
448 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200449 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300450
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200451 switch (hdev->dev_type) {
452 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200454 break;
455
456 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200457 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200458 break;
459
460 default:
461 BT_ERR("Unknown device type %d", hdev->dev_type);
462 break;
463 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200464}
465
Johan Hedberg42c6b122013-03-05 20:37:49 +0200466static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200468 __le16 param;
469 __u8 flt_type;
470
471 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200472 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200473
474 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200475 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200476
477 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200478 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200479
480 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700483 /* Read Number of Supported IAC */
484 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
485
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700486 /* Read Current IAC LAP */
487 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
488
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489 /* Clear Event Filters */
490 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200491 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200492
493 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700494 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200495 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200496}
497
Johan Hedberg42c6b122013-03-05 20:37:49 +0200498static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200499{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300500 struct hci_dev *hdev = req->hdev;
501
Johan Hedberg2177bab2013-03-05 20:37:43 +0200502 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200503 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200504
505 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800508 /* Read LE Supported States */
509 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
510
Johan Hedbergc73eee92013-04-19 18:35:21 +0300511 /* LE-only controllers have LE implicitly enabled */
512 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700513 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 /* The second byte is 0xff instead of 0x9f (two reserved bits
521 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
522 * command otherwise.
523 */
524 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
525
526 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
527 * any event mask for pre 1.2 devices.
528 */
529 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
530 return;
531
532 if (lmp_bredr_capable(hdev)) {
533 events[4] |= 0x01; /* Flow Specification Complete */
534 events[4] |= 0x02; /* Inquiry Result with RSSI */
535 events[4] |= 0x04; /* Read Remote Extended Features Complete */
536 events[5] |= 0x08; /* Synchronous Connection Complete */
537 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700538 } else {
539 /* Use a different default for LE-only devices */
540 memset(events, 0, sizeof(events));
541 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700542 events[1] |= 0x08; /* Read Remote Version Information Complete */
543 events[1] |= 0x20; /* Command Complete */
544 events[1] |= 0x40; /* Command Status */
545 events[1] |= 0x80; /* Hardware Error */
546 events[2] |= 0x04; /* Number of Completed Packets */
547 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200548
549 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
550 events[0] |= 0x80; /* Encryption Change */
551 events[5] |= 0x80; /* Encryption Key Refresh Complete */
552 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200553 }
554
555 if (lmp_inq_rssi_capable(hdev))
556 events[4] |= 0x02; /* Inquiry Result with RSSI */
557
558 if (lmp_sniffsubr_capable(hdev))
559 events[5] |= 0x20; /* Sniff Subrating */
560
561 if (lmp_pause_enc_capable(hdev))
562 events[5] |= 0x80; /* Encryption Key Refresh Complete */
563
564 if (lmp_ext_inq_capable(hdev))
565 events[5] |= 0x40; /* Extended Inquiry Result */
566
567 if (lmp_no_flush_capable(hdev))
568 events[7] |= 0x01; /* Enhanced Flush Complete */
569
570 if (lmp_lsto_capable(hdev))
571 events[6] |= 0x80; /* Link Supervision Timeout Changed */
572
573 if (lmp_ssp_capable(hdev)) {
574 events[6] |= 0x01; /* IO Capability Request */
575 events[6] |= 0x02; /* IO Capability Response */
576 events[6] |= 0x04; /* User Confirmation Request */
577 events[6] |= 0x08; /* User Passkey Request */
578 events[6] |= 0x10; /* Remote OOB Data Request */
579 events[6] |= 0x20; /* Simple Pairing Complete */
580 events[7] |= 0x04; /* User Passkey Notification */
581 events[7] |= 0x08; /* Keypress Notification */
582 events[7] |= 0x10; /* Remote Host Supported
583 * Features Notification
584 */
585 }
586
587 if (lmp_le_capable(hdev))
588 events[7] |= 0x20; /* LE Meta-Event */
589
Johan Hedberg42c6b122013-03-05 20:37:49 +0200590 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200591}
592
Johan Hedberg42c6b122013-03-05 20:37:49 +0200593static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200594{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200595 struct hci_dev *hdev = req->hdev;
596
Johan Hedberg0af801b2015-02-17 15:05:21 +0200597 if (hdev->dev_type == HCI_AMP)
598 return amp_init2(req);
599
Johan Hedberg2177bab2013-03-05 20:37:43 +0200600 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200601 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300602 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700603 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200604
605 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200607
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100608 /* All Bluetooth 1.2 and later controllers should support the
609 * HCI command for reading the local supported commands.
610 *
611 * Unfortunately some controllers indicate Bluetooth 1.2 support,
612 * but do not have support for this command. If that is the case,
613 * the driver can quirk the behavior and skip reading the local
614 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300615 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100616 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
617 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200618 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200619
620 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700621 /* When SSP is available, then the host features page
622 * should also be available as well. However some
623 * controllers list the max_page as 0 as long as SSP
624 * has not been enabled. To achieve proper debugging
625 * output, force the minimum max_page to 1 at least.
626 */
627 hdev->max_page = 0x01;
628
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700629 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200630 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800631
Johan Hedberg42c6b122013-03-05 20:37:49 +0200632 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
633 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200634 } else {
635 struct hci_cp_write_eir cp;
636
637 memset(hdev->eir, 0, sizeof(hdev->eir));
638 memset(&cp, 0, sizeof(cp));
639
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200641 }
642 }
643
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800644 if (lmp_inq_rssi_capable(hdev) ||
645 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800646 u8 mode;
647
648 /* If Extended Inquiry Result events are supported, then
649 * they are clearly preferred over Inquiry Result with RSSI
650 * events.
651 */
652 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
653
654 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
655 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200656
657 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200658 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200659
660 if (lmp_ext_feat_capable(hdev)) {
661 struct hci_cp_read_local_ext_features cp;
662
663 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200664 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
665 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200666 }
667
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700668 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200669 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200670 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
671 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200672 }
673}
674
Johan Hedberg42c6b122013-03-05 20:37:49 +0200675static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200676{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200678 struct hci_cp_write_def_link_policy cp;
679 u16 link_policy = 0;
680
681 if (lmp_rswitch_capable(hdev))
682 link_policy |= HCI_LP_RSWITCH;
683 if (lmp_hold_capable(hdev))
684 link_policy |= HCI_LP_HOLD;
685 if (lmp_sniff_capable(hdev))
686 link_policy |= HCI_LP_SNIFF;
687 if (lmp_park_capable(hdev))
688 link_policy |= HCI_LP_PARK;
689
690 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200691 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200692}
693
Johan Hedberg42c6b122013-03-05 20:37:49 +0200694static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200695{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200696 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200697 struct hci_cp_write_le_host_supported cp;
698
Johan Hedbergc73eee92013-04-19 18:35:21 +0300699 /* LE-only devices do not support explicit enablement */
700 if (!lmp_bredr_capable(hdev))
701 return;
702
Johan Hedberg2177bab2013-03-05 20:37:43 +0200703 memset(&cp, 0, sizeof(cp));
704
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700705 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200706 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200707 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200708 }
709
710 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200711 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
712 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200713}
714
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300715static void hci_set_event_mask_page_2(struct hci_request *req)
716{
717 struct hci_dev *hdev = req->hdev;
718 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
719
720 /* If Connectionless Slave Broadcast master role is supported
721 * enable all necessary events for it.
722 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800723 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300724 events[1] |= 0x40; /* Triggered Clock Capture */
725 events[1] |= 0x80; /* Synchronization Train Complete */
726 events[2] |= 0x10; /* Slave Page Response Timeout */
727 events[2] |= 0x20; /* CSB Channel Map Change */
728 }
729
730 /* If Connectionless Slave Broadcast slave role is supported
731 * enable all necessary events for it.
732 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800733 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300734 events[2] |= 0x01; /* Synchronization Train Received */
735 events[2] |= 0x02; /* CSB Receive */
736 events[2] |= 0x04; /* CSB Timeout */
737 events[2] |= 0x08; /* Truncated Page Complete */
738 }
739
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800740 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200741 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800742 events[2] |= 0x80;
743
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300744 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
745}
746
Johan Hedberg42c6b122013-03-05 20:37:49 +0200747static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200748{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200749 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300750 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200751
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200752 hci_setup_event_mask(req);
753
Johan Hedberge81be902015-08-30 21:47:20 +0300754 if (hdev->commands[6] & 0x20 &&
755 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800756 struct hci_cp_read_stored_link_key cp;
757
758 bacpy(&cp.bdaddr, BDADDR_ANY);
759 cp.read_all = 0x01;
760 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
761 }
762
Johan Hedberg2177bab2013-03-05 20:37:43 +0200763 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200764 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200765
Marcel Holtmann417287d2014-12-11 20:21:54 +0100766 if (hdev->commands[8] & 0x01)
767 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
768
769 /* Some older Broadcom based Bluetooth 1.2 controllers do not
770 * support the Read Page Scan Type command. Check support for
771 * this command in the bit mask of supported commands.
772 */
773 if (hdev->commands[13] & 0x01)
774 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
775
Andre Guedes9193c6e2014-07-01 18:10:09 -0300776 if (lmp_le_capable(hdev)) {
777 u8 events[8];
778
779 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200780 events[0] = 0x0f;
781
782 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
783 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300784
785 /* If controller supports the Connection Parameters Request
786 * Link Layer Procedure, enable the corresponding event.
787 */
788 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
789 events[0] |= 0x20; /* LE Remote Connection
790 * Parameter Request
791 */
792
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100793 /* If the controller supports the Data Length Extension
794 * feature, enable the corresponding event.
795 */
796 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
797 events[0] |= 0x40; /* LE Data Length Change */
798
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100799 /* If the controller supports Extended Scanner Filter
800 * Policies, enable the correspondig event.
801 */
802 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
803 events[1] |= 0x04; /* LE Direct Advertising
804 * Report
805 */
806
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100807 /* If the controller supports the LE Read Local P-256
808 * Public Key command, enable the corresponding event.
809 */
810 if (hdev->commands[34] & 0x02)
811 events[0] |= 0x80; /* LE Read Local P-256
812 * Public Key Complete
813 */
814
815 /* If the controller supports the LE Generate DHKey
816 * command, enable the corresponding event.
817 */
818 if (hdev->commands[34] & 0x04)
819 events[1] |= 0x01; /* LE Generate DHKey Complete */
820
Andre Guedes9193c6e2014-07-01 18:10:09 -0300821 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
822 events);
823
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200824 if (hdev->commands[25] & 0x40) {
825 /* Read LE Advertising Channel TX Power */
826 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
827 }
828
Marcel Holtmann2ab216a2015-11-01 09:39:48 +0100829 if (hdev->commands[26] & 0x40) {
830 /* Read LE White List Size */
831 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
832 0, NULL);
833 }
834
835 if (hdev->commands[26] & 0x80) {
836 /* Clear LE White List */
837 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
838 }
839
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100840 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
841 /* Read LE Maximum Data Length */
842 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
843
844 /* Read LE Suggested Default Data Length */
845 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
846 }
847
Johan Hedberg42c6b122013-03-05 20:37:49 +0200848 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300849 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300850
851 /* Read features beyond page 1 if available */
852 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
853 struct hci_cp_read_local_ext_features cp;
854
855 cp.page = p;
856 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
857 sizeof(cp), &cp);
858 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200859}
860
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300861static void hci_init4_req(struct hci_request *req, unsigned long opt)
862{
863 struct hci_dev *hdev = req->hdev;
864
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800865 /* Some Broadcom based Bluetooth controllers do not support the
866 * Delete Stored Link Key command. They are clearly indicating its
867 * absence in the bit mask of supported commands.
868 *
869 * Check the supported commands and only if the the command is marked
870 * as supported send it. If not supported assume that the controller
871 * does not have actual support for stored link keys which makes this
872 * command redundant anyway.
873 *
874 * Some controllers indicate that they support handling deleting
875 * stored link keys, but they don't. The quirk lets a driver
876 * just disable this command.
877 */
878 if (hdev->commands[6] & 0x80 &&
879 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
880 struct hci_cp_delete_stored_link_key cp;
881
882 bacpy(&cp.bdaddr, BDADDR_ANY);
883 cp.delete_all = 0x01;
884 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
885 sizeof(cp), &cp);
886 }
887
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300888 /* Set event mask page 2 if the HCI command for it is supported */
889 if (hdev->commands[22] & 0x04)
890 hci_set_event_mask_page_2(req);
891
Marcel Holtmann109e3192014-07-23 19:24:56 +0200892 /* Read local codec list if the HCI command is supported */
893 if (hdev->commands[29] & 0x20)
894 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
895
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200896 /* Get MWS transport configuration if the HCI command is supported */
897 if (hdev->commands[30] & 0x08)
898 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
899
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300900 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800901 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300902 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800903
904 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700905 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800906 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800907 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800908
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800909 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
910 sizeof(support), &support);
911 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300912}
913
Johan Hedberg2177bab2013-03-05 20:37:43 +0200914static int __hci_init(struct hci_dev *hdev)
915{
916 int err;
917
918 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
919 if (err < 0)
920 return err;
921
Marcel Holtmannf640ee92015-10-08 12:35:42 +0200922 if (hci_dev_test_flag(hdev, HCI_SETUP))
923 hci_debugfs_create_basic(hdev);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700924
Johan Hedberg2177bab2013-03-05 20:37:43 +0200925 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
926 if (err < 0)
927 return err;
928
Johan Hedberg0af801b2015-02-17 15:05:21 +0200929 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
930 * BR/EDR/LE type controllers. AMP controllers only need the
931 * first two stages of init.
932 */
933 if (hdev->dev_type != HCI_BREDR)
934 return 0;
935
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300936 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
937 if (err < 0)
938 return err;
939
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700940 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
941 if (err < 0)
942 return err;
943
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800944 /* This function is only called when the controller is actually in
945 * configured state. When the controller is marked as unconfigured,
946 * this initialization procedure is not run.
947 *
948 * It means that it is possible that a controller runs through its
949 * setup phase and then discovers missing settings. If that is the
950 * case, then this function will not be called. It then will only
951 * be called during the config phase.
952 *
953 * So only when in setup phase or config phase, create the debugfs
954 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700955 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700956 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
957 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700958 return 0;
959
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100960 hci_debugfs_create_common(hdev);
961
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100962 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100963 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700964
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800965 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100966 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700967
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700968 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200969}
970
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200971static void hci_init0_req(struct hci_request *req, unsigned long opt)
972{
973 struct hci_dev *hdev = req->hdev;
974
975 BT_DBG("%s %ld", hdev->name, opt);
976
977 /* Reset */
978 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
979 hci_reset_req(req, 0);
980
981 /* Read Local Version */
982 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
983
984 /* Read BD Address */
985 if (hdev->set_bdaddr)
986 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
987}
988
989static int __hci_unconf_init(struct hci_dev *hdev)
990{
991 int err;
992
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200993 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
994 return 0;
995
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200996 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
997 if (err < 0)
998 return err;
999
Marcel Holtmannf640ee92015-10-08 12:35:42 +02001000 if (hci_dev_test_flag(hdev, HCI_SETUP))
1001 hci_debugfs_create_basic(hdev);
1002
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001003 return 0;
1004}
1005
Johan Hedberg42c6b122013-03-05 20:37:49 +02001006static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007{
1008 __u8 scan = opt;
1009
Johan Hedberg42c6b122013-03-05 20:37:49 +02001010 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
1012 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001013 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014}
1015
Johan Hedberg42c6b122013-03-05 20:37:49 +02001016static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017{
1018 __u8 auth = opt;
1019
Johan Hedberg42c6b122013-03-05 20:37:49 +02001020 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
1022 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001023 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024}
1025
Johan Hedberg42c6b122013-03-05 20:37:49 +02001026static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027{
1028 __u8 encrypt = opt;
1029
Johan Hedberg42c6b122013-03-05 20:37:49 +02001030 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001032 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034}
1035
Johan Hedberg42c6b122013-03-05 20:37:49 +02001036static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001037{
1038 __le16 policy = cpu_to_le16(opt);
1039
Johan Hedberg42c6b122013-03-05 20:37:49 +02001040 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001041
1042 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001043 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001044}
1045
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001046/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 * Device is held on return. */
1048struct hci_dev *hci_dev_get(int index)
1049{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001050 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
1052 BT_DBG("%d", index);
1053
1054 if (index < 0)
1055 return NULL;
1056
1057 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001058 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 if (d->id == index) {
1060 hdev = hci_dev_hold(d);
1061 break;
1062 }
1063 }
1064 read_unlock(&hci_dev_list_lock);
1065 return hdev;
1066}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
1068/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001069
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001070bool hci_discovery_active(struct hci_dev *hdev)
1071{
1072 struct discovery_state *discov = &hdev->discovery;
1073
Andre Guedes6fbe1952012-02-03 17:47:58 -03001074 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001075 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001076 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001077 return true;
1078
Andre Guedes6fbe1952012-02-03 17:47:58 -03001079 default:
1080 return false;
1081 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001082}
1083
Johan Hedbergff9ef572012-01-04 14:23:45 +02001084void hci_discovery_set_state(struct hci_dev *hdev, int state)
1085{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001086 int old_state = hdev->discovery.state;
1087
Johan Hedbergff9ef572012-01-04 14:23:45 +02001088 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1089
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001090 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001091 return;
1092
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001093 hdev->discovery.state = state;
1094
Johan Hedbergff9ef572012-01-04 14:23:45 +02001095 switch (state) {
1096 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001097 hci_update_background_scan(hdev);
1098
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001099 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001100 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001101 break;
1102 case DISCOVERY_STARTING:
1103 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001104 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001105 mgmt_discovering(hdev, 1);
1106 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001107 case DISCOVERY_RESOLVING:
1108 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001109 case DISCOVERY_STOPPING:
1110 break;
1111 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001112}
1113
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001114void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115{
Johan Hedberg30883512012-01-04 14:16:21 +02001116 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001117 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
Johan Hedberg561aafb2012-01-04 13:31:59 +02001119 list_for_each_entry_safe(p, n, &cache->all, all) {
1120 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001121 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001123
1124 INIT_LIST_HEAD(&cache->unknown);
1125 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126}
1127
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001128struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1129 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130{
Johan Hedberg30883512012-01-04 14:16:21 +02001131 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 struct inquiry_entry *e;
1133
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001134 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
Johan Hedberg561aafb2012-01-04 13:31:59 +02001136 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001138 return e;
1139 }
1140
1141 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142}
1143
Johan Hedberg561aafb2012-01-04 13:31:59 +02001144struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001145 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001146{
Johan Hedberg30883512012-01-04 14:16:21 +02001147 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001148 struct inquiry_entry *e;
1149
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001150 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001151
1152 list_for_each_entry(e, &cache->unknown, list) {
1153 if (!bacmp(&e->data.bdaddr, bdaddr))
1154 return e;
1155 }
1156
1157 return NULL;
1158}
1159
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001160struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001161 bdaddr_t *bdaddr,
1162 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001163{
1164 struct discovery_state *cache = &hdev->discovery;
1165 struct inquiry_entry *e;
1166
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001167 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001168
1169 list_for_each_entry(e, &cache->resolve, list) {
1170 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1171 return e;
1172 if (!bacmp(&e->data.bdaddr, bdaddr))
1173 return e;
1174 }
1175
1176 return NULL;
1177}
1178
Johan Hedberga3d4e202012-01-09 00:53:02 +02001179void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001180 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001181{
1182 struct discovery_state *cache = &hdev->discovery;
1183 struct list_head *pos = &cache->resolve;
1184 struct inquiry_entry *p;
1185
1186 list_del(&ie->list);
1187
1188 list_for_each_entry(p, &cache->resolve, list) {
1189 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001190 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001191 break;
1192 pos = &p->list;
1193 }
1194
1195 list_add(&ie->list, pos);
1196}
1197
Marcel Holtmannaf589252014-07-01 14:11:20 +02001198u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1199 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200{
Johan Hedberg30883512012-01-04 14:16:21 +02001201 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001202 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001203 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001205 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Johan Hedberg6928a922014-10-26 20:46:09 +01001207 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001208
Marcel Holtmannaf589252014-07-01 14:11:20 +02001209 if (!data->ssp_mode)
1210 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001211
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001212 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001213 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001214 if (!ie->data.ssp_mode)
1215 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001216
Johan Hedberga3d4e202012-01-09 00:53:02 +02001217 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001218 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001219 ie->data.rssi = data->rssi;
1220 hci_inquiry_cache_update_resolve(hdev, ie);
1221 }
1222
Johan Hedberg561aafb2012-01-04 13:31:59 +02001223 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001224 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001225
Johan Hedberg561aafb2012-01-04 13:31:59 +02001226 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001227 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001228 if (!ie) {
1229 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1230 goto done;
1231 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001232
1233 list_add(&ie->all, &cache->all);
1234
1235 if (name_known) {
1236 ie->name_state = NAME_KNOWN;
1237 } else {
1238 ie->name_state = NAME_NOT_KNOWN;
1239 list_add(&ie->list, &cache->unknown);
1240 }
1241
1242update:
1243 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001244 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001245 ie->name_state = NAME_KNOWN;
1246 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 }
1248
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001249 memcpy(&ie->data, data, sizeof(*data));
1250 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001252
1253 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001254 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001255
Marcel Holtmannaf589252014-07-01 14:11:20 +02001256done:
1257 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258}
1259
1260static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1261{
Johan Hedberg30883512012-01-04 14:16:21 +02001262 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 struct inquiry_info *info = (struct inquiry_info *) buf;
1264 struct inquiry_entry *e;
1265 int copied = 0;
1266
Johan Hedberg561aafb2012-01-04 13:31:59 +02001267 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001269
1270 if (copied >= num)
1271 break;
1272
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 bacpy(&info->bdaddr, &data->bdaddr);
1274 info->pscan_rep_mode = data->pscan_rep_mode;
1275 info->pscan_period_mode = data->pscan_period_mode;
1276 info->pscan_mode = data->pscan_mode;
1277 memcpy(info->dev_class, data->dev_class, 3);
1278 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001279
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001281 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 }
1283
1284 BT_DBG("cache %p, copied %d", cache, copied);
1285 return copied;
1286}
1287
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289{
1290 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001291 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 struct hci_cp_inquiry cp;
1293
1294 BT_DBG("%s", hdev->name);
1295
1296 if (test_bit(HCI_INQUIRY, &hdev->flags))
1297 return;
1298
1299 /* Start Inquiry */
1300 memcpy(&cp.lap, &ir->lap, 3);
1301 cp.length = ir->length;
1302 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001303 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304}
1305
1306int hci_inquiry(void __user *arg)
1307{
1308 __u8 __user *ptr = arg;
1309 struct hci_inquiry_req ir;
1310 struct hci_dev *hdev;
1311 int err = 0, do_inquiry = 0, max_rsp;
1312 long timeo;
1313 __u8 *buf;
1314
1315 if (copy_from_user(&ir, ptr, sizeof(ir)))
1316 return -EFAULT;
1317
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001318 hdev = hci_dev_get(ir.dev_id);
1319 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 return -ENODEV;
1321
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001322 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001323 err = -EBUSY;
1324 goto done;
1325 }
1326
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001327 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001328 err = -EOPNOTSUPP;
1329 goto done;
1330 }
1331
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001332 if (hdev->dev_type != HCI_BREDR) {
1333 err = -EOPNOTSUPP;
1334 goto done;
1335 }
1336
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001337 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001338 err = -EOPNOTSUPP;
1339 goto done;
1340 }
1341
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001342 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001343 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001344 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001345 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 do_inquiry = 1;
1347 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001348 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349
Marcel Holtmann04837f62006-07-03 10:02:33 +02001350 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001351
1352 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001353 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1354 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001355 if (err < 0)
1356 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001357
1358 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1359 * cleared). If it is interrupted by a signal, return -EINTR.
1360 */
NeilBrown74316202014-07-07 15:16:04 +10001361 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001362 TASK_INTERRUPTIBLE))
1363 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001364 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001366 /* for unlimited number of responses we will use buffer with
1367 * 255 entries
1368 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1370
1371 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1372 * copy it to the user space.
1373 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001374 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001375 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 err = -ENOMEM;
1377 goto done;
1378 }
1379
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001380 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001382 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383
1384 BT_DBG("num_rsp %d", ir.num_rsp);
1385
1386 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1387 ptr += sizeof(ir);
1388 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001389 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001391 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 err = -EFAULT;
1393
1394 kfree(buf);
1395
1396done:
1397 hci_dev_put(hdev);
1398 return err;
1399}
1400
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001401static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 int ret = 0;
1404
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 BT_DBG("%s %p", hdev->name, hdev);
1406
1407 hci_req_lock(hdev);
1408
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001409 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001410 ret = -ENODEV;
1411 goto done;
1412 }
1413
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001414 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1415 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001416 /* Check for rfkill but allow the HCI setup stage to
1417 * proceed (which in itself doesn't cause any RF activity).
1418 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001419 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001420 ret = -ERFKILL;
1421 goto done;
1422 }
1423
1424 /* Check for valid public address or a configured static
1425 * random adddress, but let the HCI setup proceed to
1426 * be able to determine if there is a public address
1427 * or not.
1428 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001429 * In case of user channel usage, it is not important
1430 * if a public address or static random address is
1431 * available.
1432 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001433 * This check is only valid for BR/EDR controllers
1434 * since AMP controllers do not have an address.
1435 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001436 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001437 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001438 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1439 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1440 ret = -EADDRNOTAVAIL;
1441 goto done;
1442 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001443 }
1444
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 if (test_bit(HCI_UP, &hdev->flags)) {
1446 ret = -EALREADY;
1447 goto done;
1448 }
1449
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 if (hdev->open(hdev)) {
1451 ret = -EIO;
1452 goto done;
1453 }
1454
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001455 set_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001456 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001457
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001458 atomic_set(&hdev->cmd_cnt, 1);
1459 set_bit(HCI_INIT, &hdev->flags);
1460
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001461 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmanne131d742015-10-20 02:30:47 +02001462 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1463
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001464 if (hdev->setup)
1465 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001466
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001467 /* The transport driver can set these quirks before
1468 * creating the HCI device or in its setup callback.
1469 *
1470 * In case any of them is set, the controller has to
1471 * start up as unconfigured.
1472 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001473 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1474 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001475 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001476
1477 /* For an unconfigured controller it is required to
1478 * read at least the version information provided by
1479 * the Read Local Version Information command.
1480 *
1481 * If the set_bdaddr driver callback is provided, then
1482 * also the original Bluetooth public device address
1483 * will be read using the Read BD Address command.
1484 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001485 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001486 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001487 }
1488
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001489 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001490 /* If public address change is configured, ensure that
1491 * the address gets programmed. If the driver does not
1492 * support changing the public address, fail the power
1493 * on procedure.
1494 */
1495 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1496 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001497 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1498 else
1499 ret = -EADDRNOTAVAIL;
1500 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001501
1502 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001503 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001504 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001505 ret = __hci_init(hdev);
Marcel Holtmann98a63aa2015-10-20 23:25:42 +02001506 if (!ret && hdev->post_init)
1507 ret = hdev->post_init(hdev);
1508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 }
1510
Marcel Holtmann7e995b92015-10-17 16:00:26 +02001511 /* If the HCI Reset command is clearing all diagnostic settings,
1512 * then they need to be reprogrammed after the init procedure
1513 * completed.
1514 */
1515 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1516 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1517 ret = hdev->set_diag(hdev, true);
1518
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001519 clear_bit(HCI_INIT, &hdev->flags);
1520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 if (!ret) {
1522 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001523 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 set_bit(HCI_UP, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001525 hci_sock_dev_event(hdev, HCI_DEV_UP);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001526 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1527 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1528 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1529 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001530 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001531 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001532 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001533 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001534 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001535 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001537 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001538 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001539 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541 skb_queue_purge(&hdev->cmd_q);
1542 skb_queue_purge(&hdev->rx_q);
1543
1544 if (hdev->flush)
1545 hdev->flush(hdev);
1546
1547 if (hdev->sent_cmd) {
1548 kfree_skb(hdev->sent_cmd);
1549 hdev->sent_cmd = NULL;
1550 }
1551
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001552 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001553 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001554
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001556 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 }
1558
1559done:
1560 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 return ret;
1562}
1563
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001564/* ---- HCI ioctl helpers ---- */
1565
1566int hci_dev_open(__u16 dev)
1567{
1568 struct hci_dev *hdev;
1569 int err;
1570
1571 hdev = hci_dev_get(dev);
1572 if (!hdev)
1573 return -ENODEV;
1574
Marcel Holtmann4a964402014-07-02 19:10:33 +02001575 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001576 * up as user channel. Trying to bring them up as normal devices
1577 * will result into a failure. Only user channel operation is
1578 * possible.
1579 *
1580 * When this function is called for a user channel, the flag
1581 * HCI_USER_CHANNEL will be set first before attempting to
1582 * open the device.
1583 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001584 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1585 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001586 err = -EOPNOTSUPP;
1587 goto done;
1588 }
1589
Johan Hedberge1d08f42013-10-01 22:44:50 +03001590 /* We need to ensure that no other power on/off work is pending
1591 * before proceeding to call hci_dev_do_open. This is
1592 * particularly important if the setup procedure has not yet
1593 * completed.
1594 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001595 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001596 cancel_delayed_work(&hdev->power_off);
1597
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001598 /* After this call it is guaranteed that the setup procedure
1599 * has finished. This means that error conditions like RFKILL
1600 * or no valid public or static random address apply.
1601 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001602 flush_workqueue(hdev->req_workqueue);
1603
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001604 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001605 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001606 * so that pairing works for them. Once the management interface
1607 * is in use this bit will be cleared again and userspace has
1608 * to explicitly enable it.
1609 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001610 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1611 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001612 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001613
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001614 err = hci_dev_do_open(hdev);
1615
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001616done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001617 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001618 return err;
1619}
1620
Johan Hedbergd7347f32014-07-04 12:37:23 +03001621/* This function requires the caller holds hdev->lock */
1622static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1623{
1624 struct hci_conn_params *p;
1625
Johan Hedbergf161dd42014-08-15 21:06:54 +03001626 list_for_each_entry(p, &hdev->le_conn_params, list) {
1627 if (p->conn) {
1628 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001629 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001630 p->conn = NULL;
1631 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001632 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001633 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001634
1635 BT_DBG("All LE pending actions cleared");
1636}
1637
Simon Fels6b3cc1d2015-09-02 12:10:12 +02001638int hci_dev_do_close(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639{
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001640 bool auto_off;
1641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 BT_DBG("%s %p", hdev->name, hdev);
1643
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001644 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
Loic Poulain867146a2015-06-09 11:46:30 +02001645 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001646 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001647 /* Execute vendor specific shutdown routine */
1648 if (hdev->shutdown)
1649 hdev->shutdown(hdev);
1650 }
1651
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001652 cancel_delayed_work(&hdev->power_off);
1653
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 hci_req_cancel(hdev, ENODEV);
1655 hci_req_lock(hdev);
1656
1657 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001658 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 hci_req_unlock(hdev);
1660 return 0;
1661 }
1662
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001663 /* Flush RX and TX works */
1664 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001665 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001667 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001668 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001669 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001670 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1671 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001672 }
1673
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001674 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001675 cancel_delayed_work(&hdev->service_cache);
1676
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001677 cancel_delayed_work_sync(&hdev->le_scan_disable);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08001678 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001679
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001680 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001681 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001682
Florian Grandel5d900e42015-06-18 03:16:35 +02001683 if (hdev->adv_instance_timeout) {
1684 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1685 hdev->adv_instance_timeout = 0;
1686 }
1687
Johan Hedberg76727c02014-11-18 09:00:14 +02001688 /* Avoid potential lockdep warnings from the *_flush() calls by
1689 * ensuring the workqueue is empty up front.
1690 */
1691 drain_workqueue(hdev->workqueue);
1692
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001693 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001694
Johan Hedberg8f502f82015-01-28 19:56:02 +02001695 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1696
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001697 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1698
1699 if (!auto_off && hdev->dev_type == HCI_BREDR)
1700 mgmt_powered(hdev, 0);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001701
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001702 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001703 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001704 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001705 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
Marcel Holtmann64dae962015-01-28 14:10:28 -08001707 smp_unregister(hdev);
1708
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001709 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
1711 if (hdev->flush)
1712 hdev->flush(hdev);
1713
1714 /* Reset device */
1715 skb_queue_purge(&hdev->cmd_q);
1716 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannacc649c2015-10-08 01:53:55 +02001717 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1718 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001720 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 clear_bit(HCI_INIT, &hdev->flags);
1722 }
1723
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001724 /* flush cmd work */
1725 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
1727 /* Drop queues */
1728 skb_queue_purge(&hdev->rx_q);
1729 skb_queue_purge(&hdev->cmd_q);
1730 skb_queue_purge(&hdev->raw_q);
1731
1732 /* Drop last sent command */
1733 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001734 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 kfree_skb(hdev->sent_cmd);
1736 hdev->sent_cmd = NULL;
1737 }
1738
Marcel Holtmanne9ca8bf2015-10-04 23:34:02 +02001739 clear_bit(HCI_RUNNING, &hdev->flags);
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01001740 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
Marcel Holtmann4a3f95b2015-10-04 23:34:00 +02001741
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 /* After this point our queues are empty
1743 * and no tasks are scheduled. */
1744 hdev->close(hdev);
1745
Johan Hedberg35b973c2013-03-15 17:06:59 -05001746 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001747 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001748 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001749
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001750 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001751 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001752
Johan Hedberge59fda82012-02-22 18:11:53 +02001753 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001754 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001755 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001756
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 hci_req_unlock(hdev);
1758
1759 hci_dev_put(hdev);
1760 return 0;
1761}
1762
1763int hci_dev_close(__u16 dev)
1764{
1765 struct hci_dev *hdev;
1766 int err;
1767
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001768 hdev = hci_dev_get(dev);
1769 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001771
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001772 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001773 err = -EBUSY;
1774 goto done;
1775 }
1776
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001777 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001778 cancel_delayed_work(&hdev->power_off);
1779
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001781
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001782done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 hci_dev_put(hdev);
1784 return err;
1785}
1786
Marcel Holtmann5c912492015-01-28 11:53:05 -08001787static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001789 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Marcel Holtmann5c912492015-01-28 11:53:05 -08001791 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792
1793 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 /* Drop queues */
1796 skb_queue_purge(&hdev->rx_q);
1797 skb_queue_purge(&hdev->cmd_q);
1798
Johan Hedberg76727c02014-11-18 09:00:14 +02001799 /* Avoid potential lockdep warnings from the *_flush() calls by
1800 * ensuring the workqueue is empty up front.
1801 */
1802 drain_workqueue(hdev->workqueue);
1803
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001804 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001805 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001807 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
1809 if (hdev->flush)
1810 hdev->flush(hdev);
1811
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001812 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001813 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001815 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 return ret;
1819}
1820
Marcel Holtmann5c912492015-01-28 11:53:05 -08001821int hci_dev_reset(__u16 dev)
1822{
1823 struct hci_dev *hdev;
1824 int err;
1825
1826 hdev = hci_dev_get(dev);
1827 if (!hdev)
1828 return -ENODEV;
1829
1830 if (!test_bit(HCI_UP, &hdev->flags)) {
1831 err = -ENETDOWN;
1832 goto done;
1833 }
1834
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001835 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001836 err = -EBUSY;
1837 goto done;
1838 }
1839
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001840 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001841 err = -EOPNOTSUPP;
1842 goto done;
1843 }
1844
1845 err = hci_dev_do_reset(hdev);
1846
1847done:
1848 hci_dev_put(hdev);
1849 return err;
1850}
1851
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852int hci_dev_reset_stat(__u16 dev)
1853{
1854 struct hci_dev *hdev;
1855 int ret = 0;
1856
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001857 hdev = hci_dev_get(dev);
1858 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 return -ENODEV;
1860
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001861 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001862 ret = -EBUSY;
1863 goto done;
1864 }
1865
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001866 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001867 ret = -EOPNOTSUPP;
1868 goto done;
1869 }
1870
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1872
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001873done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 return ret;
1876}
1877
Johan Hedberg123abc02014-07-10 12:09:07 +03001878static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1879{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001880 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001881
1882 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1883
1884 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001885 conn_changed = !hci_dev_test_and_set_flag(hdev,
1886 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001887 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001888 conn_changed = hci_dev_test_and_clear_flag(hdev,
1889 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001890
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001891 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001892 discov_changed = !hci_dev_test_and_set_flag(hdev,
1893 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001894 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001895 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001896 discov_changed = hci_dev_test_and_clear_flag(hdev,
1897 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001898 }
1899
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001900 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001901 return;
1902
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001903 if (conn_changed || discov_changed) {
1904 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001905 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001906
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001907 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001908 mgmt_update_adv_data(hdev);
1909
Johan Hedberg123abc02014-07-10 12:09:07 +03001910 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001911 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001912}
1913
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914int hci_dev_cmd(unsigned int cmd, void __user *arg)
1915{
1916 struct hci_dev *hdev;
1917 struct hci_dev_req dr;
1918 int err = 0;
1919
1920 if (copy_from_user(&dr, arg, sizeof(dr)))
1921 return -EFAULT;
1922
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001923 hdev = hci_dev_get(dr.dev_id);
1924 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 return -ENODEV;
1926
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001927 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001928 err = -EBUSY;
1929 goto done;
1930 }
1931
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001932 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001933 err = -EOPNOTSUPP;
1934 goto done;
1935 }
1936
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001937 if (hdev->dev_type != HCI_BREDR) {
1938 err = -EOPNOTSUPP;
1939 goto done;
1940 }
1941
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001942 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001943 err = -EOPNOTSUPP;
1944 goto done;
1945 }
1946
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 switch (cmd) {
1948 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001949 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1950 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 break;
1952
1953 case HCISETENCRYPT:
1954 if (!lmp_encrypt_capable(hdev)) {
1955 err = -EOPNOTSUPP;
1956 break;
1957 }
1958
1959 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1960 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001961 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1962 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 if (err)
1964 break;
1965 }
1966
Johan Hedberg01178cd2013-03-05 20:37:41 +02001967 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1968 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 break;
1970
1971 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001972 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1973 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001974
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001975 /* Ensure that the connectable and discoverable states
1976 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001977 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001978 if (!err)
1979 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 break;
1981
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001982 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001983 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1984 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001985 break;
1986
1987 case HCISETLINKMODE:
1988 hdev->link_mode = ((__u16) dr.dev_opt) &
1989 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1990 break;
1991
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 case HCISETPTYPE:
1993 hdev->pkt_type = (__u16) dr.dev_opt;
1994 break;
1995
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001997 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1998 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 break;
2000
2001 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002002 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2003 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 break;
2005
2006 default:
2007 err = -EINVAL;
2008 break;
2009 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002010
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002011done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 hci_dev_put(hdev);
2013 return err;
2014}
2015
2016int hci_get_dev_list(void __user *arg)
2017{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002018 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 struct hci_dev_list_req *dl;
2020 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 int n = 0, size, err;
2022 __u16 dev_num;
2023
2024 if (get_user(dev_num, (__u16 __user *) arg))
2025 return -EFAULT;
2026
2027 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2028 return -EINVAL;
2029
2030 size = sizeof(*dl) + dev_num * sizeof(*dr);
2031
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002032 dl = kzalloc(size, GFP_KERNEL);
2033 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 return -ENOMEM;
2035
2036 dr = dl->dev_req;
2037
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002038 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002039 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002040 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002041
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002042 /* When the auto-off is configured it means the transport
2043 * is running, but in that case still indicate that the
2044 * device is actually down.
2045 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002046 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002047 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002050 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002051
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 if (++n >= dev_num)
2053 break;
2054 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002055 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056
2057 dl->dev_num = n;
2058 size = sizeof(*dl) + n * sizeof(*dr);
2059
2060 err = copy_to_user(arg, dl, size);
2061 kfree(dl);
2062
2063 return err ? -EFAULT : 0;
2064}
2065
2066int hci_get_dev_info(void __user *arg)
2067{
2068 struct hci_dev *hdev;
2069 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002070 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 int err = 0;
2072
2073 if (copy_from_user(&di, arg, sizeof(di)))
2074 return -EFAULT;
2075
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002076 hdev = hci_dev_get(di.dev_id);
2077 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 return -ENODEV;
2079
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002080 /* When the auto-off is configured it means the transport
2081 * is running, but in that case still indicate that the
2082 * device is actually down.
2083 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002084 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002085 flags = hdev->flags & ~BIT(HCI_UP);
2086 else
2087 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002088
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 strcpy(di.name, hdev->name);
2090 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002091 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002092 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002094 if (lmp_bredr_capable(hdev)) {
2095 di.acl_mtu = hdev->acl_mtu;
2096 di.acl_pkts = hdev->acl_pkts;
2097 di.sco_mtu = hdev->sco_mtu;
2098 di.sco_pkts = hdev->sco_pkts;
2099 } else {
2100 di.acl_mtu = hdev->le_mtu;
2101 di.acl_pkts = hdev->le_pkts;
2102 di.sco_mtu = 0;
2103 di.sco_pkts = 0;
2104 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 di.link_policy = hdev->link_policy;
2106 di.link_mode = hdev->link_mode;
2107
2108 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2109 memcpy(&di.features, &hdev->features, sizeof(di.features));
2110
2111 if (copy_to_user(arg, &di, sizeof(di)))
2112 err = -EFAULT;
2113
2114 hci_dev_put(hdev);
2115
2116 return err;
2117}
2118
2119/* ---- Interface to HCI drivers ---- */
2120
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002121static int hci_rfkill_set_block(void *data, bool blocked)
2122{
2123 struct hci_dev *hdev = data;
2124
2125 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2126
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002127 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002128 return -EBUSY;
2129
Johan Hedberg5e130362013-09-13 08:58:17 +03002130 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002131 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002132 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2133 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002134 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002135 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002136 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002137 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002138
2139 return 0;
2140}
2141
2142static const struct rfkill_ops hci_rfkill_ops = {
2143 .set_block = hci_rfkill_set_block,
2144};
2145
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002146static void hci_power_on(struct work_struct *work)
2147{
2148 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002149 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002150
2151 BT_DBG("%s", hdev->name);
2152
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002153 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002154 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302155 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002156 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302157 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002158 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002159 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002160
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002161 /* During the HCI setup phase, a few error conditions are
2162 * ignored and they need to be checked now. If they are still
2163 * valid, it is important to turn the device back off.
2164 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002165 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2166 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002167 (hdev->dev_type == HCI_BREDR &&
2168 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2169 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002170 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002171 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002172 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002173 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2174 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002175 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002176
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002177 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002178 /* For unconfigured devices, set the HCI_RAW flag
2179 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002180 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002181 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002182 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002183
2184 /* For fully configured devices, this will send
2185 * the Index Added event. For unconfigured devices,
2186 * it will send Unconfigued Index Added event.
2187 *
2188 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2189 * and no event will be send.
2190 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002191 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002192 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002193 /* When the controller is now configured, then it
2194 * is important to clear the HCI_RAW flag.
2195 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002196 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002197 clear_bit(HCI_RAW, &hdev->flags);
2198
Marcel Holtmannd603b762014-07-06 12:11:14 +02002199 /* Powering on the controller with HCI_CONFIG set only
2200 * happens with the transition from unconfigured to
2201 * configured. This will send the Index Added event.
2202 */
2203 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002204 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002205}
2206
2207static void hci_power_off(struct work_struct *work)
2208{
Johan Hedberg32435532011-11-07 22:16:04 +02002209 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002210 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002211
2212 BT_DBG("%s", hdev->name);
2213
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002214 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002215}
2216
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002217static void hci_error_reset(struct work_struct *work)
2218{
2219 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2220
2221 BT_DBG("%s", hdev->name);
2222
2223 if (hdev->hw_error)
2224 hdev->hw_error(hdev, hdev->hw_error_code);
2225 else
2226 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2227 hdev->hw_error_code);
2228
2229 if (hci_dev_do_close(hdev))
2230 return;
2231
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002232 hci_dev_do_open(hdev);
2233}
2234
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002235static void hci_discov_off(struct work_struct *work)
2236{
2237 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002238
2239 hdev = container_of(work, struct hci_dev, discov_off.work);
2240
2241 BT_DBG("%s", hdev->name);
2242
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002243 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002244}
2245
Florian Grandel5d900e42015-06-18 03:16:35 +02002246static void hci_adv_timeout_expire(struct work_struct *work)
2247{
2248 struct hci_dev *hdev;
2249
2250 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2251
2252 BT_DBG("%s", hdev->name);
2253
2254 mgmt_adv_timeout_expired(hdev);
2255}
2256
Johan Hedberg35f74982014-02-18 17:14:32 +02002257void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002258{
Johan Hedberg48210022013-01-27 00:31:28 +02002259 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002260
Johan Hedberg48210022013-01-27 00:31:28 +02002261 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2262 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002263 kfree(uuid);
2264 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002265}
2266
Johan Hedberg35f74982014-02-18 17:14:32 +02002267void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002268{
Johan Hedberg0378b592014-11-19 15:22:22 +02002269 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002270
Johan Hedberg0378b592014-11-19 15:22:22 +02002271 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2272 list_del_rcu(&key->list);
2273 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002274 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002275}
2276
Johan Hedberg35f74982014-02-18 17:14:32 +02002277void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002278{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002279 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002280
Johan Hedberg970d0f12014-11-13 14:37:47 +02002281 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2282 list_del_rcu(&k->list);
2283 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002284 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002285}
2286
Johan Hedberg970c4e42014-02-18 10:19:33 +02002287void hci_smp_irks_clear(struct hci_dev *hdev)
2288{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002289 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002290
Johan Hedbergadae20c2014-11-13 14:37:48 +02002291 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2292 list_del_rcu(&k->list);
2293 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002294 }
2295}
2296
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002297struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2298{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002299 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002300
Johan Hedberg0378b592014-11-19 15:22:22 +02002301 rcu_read_lock();
2302 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2303 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2304 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002305 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002306 }
2307 }
2308 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002309
2310 return NULL;
2311}
2312
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302313static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002314 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002315{
2316 /* Legacy key */
2317 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302318 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002319
2320 /* Debug keys are insecure so don't store them persistently */
2321 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302322 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002323
2324 /* Changed combination key and there's no previous one */
2325 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302326 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002327
2328 /* Security mode 3 case */
2329 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302330 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002331
Johan Hedberge3befab2014-06-01 16:33:39 +03002332 /* BR/EDR key derived using SC from an LE link */
2333 if (conn->type == LE_LINK)
2334 return true;
2335
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002336 /* Neither local nor remote side had no-bonding as requirement */
2337 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302338 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002339
2340 /* Local side had dedicated bonding as requirement */
2341 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302342 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002343
2344 /* Remote side had dedicated bonding as requirement */
2345 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302346 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002347
2348 /* If none of the above criteria match, then don't store the key
2349 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302350 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002351}
2352
Johan Hedberge804d252014-07-16 11:42:28 +03002353static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002354{
Johan Hedberge804d252014-07-16 11:42:28 +03002355 if (type == SMP_LTK)
2356 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002357
Johan Hedberge804d252014-07-16 11:42:28 +03002358 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002359}
2360
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002361struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2362 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002363{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002364 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002365
Johan Hedberg970d0f12014-11-13 14:37:47 +02002366 rcu_read_lock();
2367 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002368 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2369 continue;
2370
Johan Hedberg923e2412014-12-03 12:43:39 +02002371 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002372 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002373 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002374 }
2375 }
2376 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002377
2378 return NULL;
2379}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002380
Johan Hedberg970c4e42014-02-18 10:19:33 +02002381struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2382{
2383 struct smp_irk *irk;
2384
Johan Hedbergadae20c2014-11-13 14:37:48 +02002385 rcu_read_lock();
2386 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2387 if (!bacmp(&irk->rpa, rpa)) {
2388 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002389 return irk;
2390 }
2391 }
2392
Johan Hedbergadae20c2014-11-13 14:37:48 +02002393 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2394 if (smp_irk_matches(hdev, irk->val, rpa)) {
2395 bacpy(&irk->rpa, rpa);
2396 rcu_read_unlock();
2397 return irk;
2398 }
2399 }
2400 rcu_read_unlock();
2401
Johan Hedberg970c4e42014-02-18 10:19:33 +02002402 return NULL;
2403}
2404
2405struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2406 u8 addr_type)
2407{
2408 struct smp_irk *irk;
2409
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002410 /* Identity Address must be public or static random */
2411 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2412 return NULL;
2413
Johan Hedbergadae20c2014-11-13 14:37:48 +02002414 rcu_read_lock();
2415 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002416 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002417 bacmp(bdaddr, &irk->bdaddr) == 0) {
2418 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002419 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002420 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002421 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002422 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002423
2424 return NULL;
2425}
2426
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002427struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002428 bdaddr_t *bdaddr, u8 *val, u8 type,
2429 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002430{
2431 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302432 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002433
2434 old_key = hci_find_link_key(hdev, bdaddr);
2435 if (old_key) {
2436 old_key_type = old_key->type;
2437 key = old_key;
2438 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002439 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002440 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002441 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002442 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002443 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002444 }
2445
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002446 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002447
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002448 /* Some buggy controller combinations generate a changed
2449 * combination key for legacy pairing even when there's no
2450 * previous key */
2451 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002452 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002453 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002454 if (conn)
2455 conn->key_type = type;
2456 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002457
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002458 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002459 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002460 key->pin_len = pin_len;
2461
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002462 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002463 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002464 else
2465 key->type = type;
2466
Johan Hedberg7652ff62014-06-24 13:15:49 +03002467 if (persistent)
2468 *persistent = hci_persistent_key(hdev, conn, type,
2469 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002470
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002471 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002472}
2473
Johan Hedbergca9142b2014-02-19 14:57:44 +02002474struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002475 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002476 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002477{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002478 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002479 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002480
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002481 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002482 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002483 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002484 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002485 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002486 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002487 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002488 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002489 }
2490
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002491 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002492 key->bdaddr_type = addr_type;
2493 memcpy(key->val, tk, sizeof(key->val));
2494 key->authenticated = authenticated;
2495 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002496 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002497 key->enc_size = enc_size;
2498 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002499
Johan Hedbergca9142b2014-02-19 14:57:44 +02002500 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002501}
2502
Johan Hedbergca9142b2014-02-19 14:57:44 +02002503struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2504 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002505{
2506 struct smp_irk *irk;
2507
2508 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2509 if (!irk) {
2510 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2511 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002512 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002513
2514 bacpy(&irk->bdaddr, bdaddr);
2515 irk->addr_type = addr_type;
2516
Johan Hedbergadae20c2014-11-13 14:37:48 +02002517 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002518 }
2519
2520 memcpy(irk->val, val, 16);
2521 bacpy(&irk->rpa, rpa);
2522
Johan Hedbergca9142b2014-02-19 14:57:44 +02002523 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002524}
2525
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002526int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2527{
2528 struct link_key *key;
2529
2530 key = hci_find_link_key(hdev, bdaddr);
2531 if (!key)
2532 return -ENOENT;
2533
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002534 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002535
Johan Hedberg0378b592014-11-19 15:22:22 +02002536 list_del_rcu(&key->list);
2537 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002538
2539 return 0;
2540}
2541
Johan Hedberge0b2b272014-02-18 17:14:31 +02002542int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002543{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002544 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002545 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002546
Johan Hedberg970d0f12014-11-13 14:37:47 +02002547 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002548 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002549 continue;
2550
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002551 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002552
Johan Hedberg970d0f12014-11-13 14:37:47 +02002553 list_del_rcu(&k->list);
2554 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002555 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002556 }
2557
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002558 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002559}
2560
Johan Hedberga7ec7332014-02-18 17:14:35 +02002561void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2562{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002563 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002564
Johan Hedbergadae20c2014-11-13 14:37:48 +02002565 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002566 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2567 continue;
2568
2569 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2570
Johan Hedbergadae20c2014-11-13 14:37:48 +02002571 list_del_rcu(&k->list);
2572 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002573 }
2574}
2575
Johan Hedberg55e76b32015-03-10 22:34:40 +02002576bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2577{
2578 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002579 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002580 u8 addr_type;
2581
2582 if (type == BDADDR_BREDR) {
2583 if (hci_find_link_key(hdev, bdaddr))
2584 return true;
2585 return false;
2586 }
2587
2588 /* Convert to HCI addr type which struct smp_ltk uses */
2589 if (type == BDADDR_LE_PUBLIC)
2590 addr_type = ADDR_LE_DEV_PUBLIC;
2591 else
2592 addr_type = ADDR_LE_DEV_RANDOM;
2593
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002594 irk = hci_get_irk(hdev, bdaddr, addr_type);
2595 if (irk) {
2596 bdaddr = &irk->bdaddr;
2597 addr_type = irk->addr_type;
2598 }
2599
Johan Hedberg55e76b32015-03-10 22:34:40 +02002600 rcu_read_lock();
2601 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002602 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2603 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002604 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002605 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002606 }
2607 rcu_read_unlock();
2608
2609 return false;
2610}
2611
Ville Tervo6bd32322011-02-16 16:32:41 +02002612/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002613static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002614{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002615 struct hci_dev *hdev = container_of(work, struct hci_dev,
2616 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002617
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002618 if (hdev->sent_cmd) {
2619 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2620 u16 opcode = __le16_to_cpu(sent->opcode);
2621
2622 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2623 } else {
2624 BT_ERR("%s command tx timeout", hdev->name);
2625 }
2626
Ville Tervo6bd32322011-02-16 16:32:41 +02002627 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002628 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002629}
2630
Szymon Janc2763eda2011-03-22 13:12:22 +01002631struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002632 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002633{
2634 struct oob_data *data;
2635
Johan Hedberg6928a922014-10-26 20:46:09 +01002636 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2637 if (bacmp(bdaddr, &data->bdaddr) != 0)
2638 continue;
2639 if (data->bdaddr_type != bdaddr_type)
2640 continue;
2641 return data;
2642 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002643
2644 return NULL;
2645}
2646
Johan Hedberg6928a922014-10-26 20:46:09 +01002647int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2648 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002649{
2650 struct oob_data *data;
2651
Johan Hedberg6928a922014-10-26 20:46:09 +01002652 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002653 if (!data)
2654 return -ENOENT;
2655
Johan Hedberg6928a922014-10-26 20:46:09 +01002656 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002657
2658 list_del(&data->list);
2659 kfree(data);
2660
2661 return 0;
2662}
2663
Johan Hedberg35f74982014-02-18 17:14:32 +02002664void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002665{
2666 struct oob_data *data, *n;
2667
2668 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2669 list_del(&data->list);
2670 kfree(data);
2671 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002672}
2673
Marcel Holtmann07988722014-01-10 02:07:29 -08002674int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002675 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002676 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002677{
2678 struct oob_data *data;
2679
Johan Hedberg6928a922014-10-26 20:46:09 +01002680 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002681 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002682 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002683 if (!data)
2684 return -ENOMEM;
2685
2686 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002687 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002688 list_add(&data->list, &hdev->remote_oob_data);
2689 }
2690
Johan Hedberg81328d52014-10-26 20:33:47 +01002691 if (hash192 && rand192) {
2692 memcpy(data->hash192, hash192, sizeof(data->hash192));
2693 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002694 if (hash256 && rand256)
2695 data->present = 0x03;
Johan Hedberg81328d52014-10-26 20:33:47 +01002696 } else {
2697 memset(data->hash192, 0, sizeof(data->hash192));
2698 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002699 if (hash256 && rand256)
2700 data->present = 0x02;
2701 else
2702 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002703 }
2704
Johan Hedberg81328d52014-10-26 20:33:47 +01002705 if (hash256 && rand256) {
2706 memcpy(data->hash256, hash256, sizeof(data->hash256));
2707 memcpy(data->rand256, rand256, sizeof(data->rand256));
2708 } else {
2709 memset(data->hash256, 0, sizeof(data->hash256));
2710 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002711 if (hash192 && rand192)
2712 data->present = 0x01;
Johan Hedberg81328d52014-10-26 20:33:47 +01002713 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002714
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002715 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002716
2717 return 0;
2718}
2719
Florian Grandeld2609b32015-06-18 03:16:34 +02002720/* This function requires the caller holds hdev->lock */
2721struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2722{
2723 struct adv_info *adv_instance;
2724
2725 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2726 if (adv_instance->instance == instance)
2727 return adv_instance;
2728 }
2729
2730 return NULL;
2731}
2732
2733/* This function requires the caller holds hdev->lock */
2734struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2735 struct adv_info *cur_instance;
2736
2737 cur_instance = hci_find_adv_instance(hdev, instance);
2738 if (!cur_instance)
2739 return NULL;
2740
2741 if (cur_instance == list_last_entry(&hdev->adv_instances,
2742 struct adv_info, list))
2743 return list_first_entry(&hdev->adv_instances,
2744 struct adv_info, list);
2745 else
2746 return list_next_entry(cur_instance, list);
2747}
2748
2749/* This function requires the caller holds hdev->lock */
2750int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2751{
2752 struct adv_info *adv_instance;
2753
2754 adv_instance = hci_find_adv_instance(hdev, instance);
2755 if (!adv_instance)
2756 return -ENOENT;
2757
2758 BT_DBG("%s removing %dMR", hdev->name, instance);
2759
Florian Grandel5d900e42015-06-18 03:16:35 +02002760 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2761 cancel_delayed_work(&hdev->adv_instance_expire);
2762 hdev->adv_instance_timeout = 0;
2763 }
2764
Florian Grandeld2609b32015-06-18 03:16:34 +02002765 list_del(&adv_instance->list);
2766 kfree(adv_instance);
2767
2768 hdev->adv_instance_cnt--;
2769
2770 return 0;
2771}
2772
2773/* This function requires the caller holds hdev->lock */
2774void hci_adv_instances_clear(struct hci_dev *hdev)
2775{
2776 struct adv_info *adv_instance, *n;
2777
Florian Grandel5d900e42015-06-18 03:16:35 +02002778 if (hdev->adv_instance_timeout) {
2779 cancel_delayed_work(&hdev->adv_instance_expire);
2780 hdev->adv_instance_timeout = 0;
2781 }
2782
Florian Grandeld2609b32015-06-18 03:16:34 +02002783 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2784 list_del(&adv_instance->list);
2785 kfree(adv_instance);
2786 }
2787
2788 hdev->adv_instance_cnt = 0;
2789}
2790
2791/* This function requires the caller holds hdev->lock */
2792int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2793 u16 adv_data_len, u8 *adv_data,
2794 u16 scan_rsp_len, u8 *scan_rsp_data,
2795 u16 timeout, u16 duration)
2796{
2797 struct adv_info *adv_instance;
2798
2799 adv_instance = hci_find_adv_instance(hdev, instance);
2800 if (adv_instance) {
2801 memset(adv_instance->adv_data, 0,
2802 sizeof(adv_instance->adv_data));
2803 memset(adv_instance->scan_rsp_data, 0,
2804 sizeof(adv_instance->scan_rsp_data));
2805 } else {
2806 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2807 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2808 return -EOVERFLOW;
2809
Johan Hedberg39ecfad2015-06-18 20:50:08 +03002810 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
Florian Grandeld2609b32015-06-18 03:16:34 +02002811 if (!adv_instance)
2812 return -ENOMEM;
2813
Florian Grandelfffd38b2015-06-18 03:16:47 +02002814 adv_instance->pending = true;
Florian Grandeld2609b32015-06-18 03:16:34 +02002815 adv_instance->instance = instance;
2816 list_add(&adv_instance->list, &hdev->adv_instances);
2817 hdev->adv_instance_cnt++;
2818 }
2819
2820 adv_instance->flags = flags;
2821 adv_instance->adv_data_len = adv_data_len;
2822 adv_instance->scan_rsp_len = scan_rsp_len;
2823
2824 if (adv_data_len)
2825 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2826
2827 if (scan_rsp_len)
2828 memcpy(adv_instance->scan_rsp_data,
2829 scan_rsp_data, scan_rsp_len);
2830
2831 adv_instance->timeout = timeout;
Florian Grandel5d900e42015-06-18 03:16:35 +02002832 adv_instance->remaining_time = timeout;
Florian Grandeld2609b32015-06-18 03:16:34 +02002833
2834 if (duration == 0)
2835 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2836 else
2837 adv_instance->duration = duration;
2838
2839 BT_DBG("%s for %dMR", hdev->name, instance);
2840
2841 return 0;
2842}
2843
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002844struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002845 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002846{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002847 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002848
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002849 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002850 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002851 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002852 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002853
2854 return NULL;
2855}
2856
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002857void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002858{
2859 struct list_head *p, *n;
2860
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002861 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002862 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002863
2864 list_del(p);
2865 kfree(b);
2866 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002867}
2868
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002869int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002870{
2871 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002872
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002873 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002874 return -EBADF;
2875
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002876 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002877 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002878
Johan Hedberg27f70f32014-07-21 10:50:06 +03002879 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002880 if (!entry)
2881 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002882
2883 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002884 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002885
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002886 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002887
2888 return 0;
2889}
2890
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002891int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002892{
2893 struct bdaddr_list *entry;
2894
Johan Hedberg35f74982014-02-18 17:14:32 +02002895 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002896 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002897 return 0;
2898 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002899
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002900 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002901 if (!entry)
2902 return -ENOENT;
2903
2904 list_del(&entry->list);
2905 kfree(entry);
2906
2907 return 0;
2908}
2909
Andre Guedes15819a72014-02-03 13:56:18 -03002910/* This function requires the caller holds hdev->lock */
2911struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2912 bdaddr_t *addr, u8 addr_type)
2913{
2914 struct hci_conn_params *params;
2915
2916 list_for_each_entry(params, &hdev->le_conn_params, list) {
2917 if (bacmp(&params->addr, addr) == 0 &&
2918 params->addr_type == addr_type) {
2919 return params;
2920 }
2921 }
2922
2923 return NULL;
2924}
2925
2926/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002927struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2928 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002929{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002930 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002931
Johan Hedberg501f8822014-07-04 12:37:26 +03002932 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002933 if (bacmp(&param->addr, addr) == 0 &&
2934 param->addr_type == addr_type)
2935 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002936 }
2937
2938 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002939}
2940
2941/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002942struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2943 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002944{
2945 struct hci_conn_params *params;
2946
2947 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002948 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002949 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002950
2951 params = kzalloc(sizeof(*params), GFP_KERNEL);
2952 if (!params) {
2953 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002954 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002955 }
2956
2957 bacpy(&params->addr, addr);
2958 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002959
2960 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002961 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002962
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002963 params->conn_min_interval = hdev->le_conn_min_interval;
2964 params->conn_max_interval = hdev->le_conn_max_interval;
2965 params->conn_latency = hdev->le_conn_latency;
2966 params->supervision_timeout = hdev->le_supv_timeout;
2967 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2968
2969 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2970
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002971 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002972}
2973
Johan Hedbergf6c63242014-08-15 21:06:59 +03002974static void hci_conn_params_free(struct hci_conn_params *params)
2975{
2976 if (params->conn) {
2977 hci_conn_drop(params->conn);
2978 hci_conn_put(params->conn);
2979 }
2980
2981 list_del(&params->action);
2982 list_del(&params->list);
2983 kfree(params);
2984}
2985
Andre Guedes15819a72014-02-03 13:56:18 -03002986/* This function requires the caller holds hdev->lock */
2987void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2988{
2989 struct hci_conn_params *params;
2990
2991 params = hci_conn_params_lookup(hdev, addr, addr_type);
2992 if (!params)
2993 return;
2994
Johan Hedbergf6c63242014-08-15 21:06:59 +03002995 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002996
Johan Hedberg95305ba2014-07-04 12:37:21 +03002997 hci_update_background_scan(hdev);
2998
Andre Guedes15819a72014-02-03 13:56:18 -03002999 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3000}
3001
3002/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003003void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003004{
3005 struct hci_conn_params *params, *tmp;
3006
3007 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03003008 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3009 continue;
Jakub Pawlowskif75113a2015-08-07 20:22:53 +02003010
3011 /* If trying to estabilish one time connection to disabled
3012 * device, leave the params, but mark them as just once.
3013 */
3014 if (params->explicit_connect) {
3015 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3016 continue;
3017 }
3018
Andre Guedes15819a72014-02-03 13:56:18 -03003019 list_del(&params->list);
3020 kfree(params);
3021 }
3022
Johan Hedberg55af49a2014-07-02 17:37:26 +03003023 BT_DBG("All LE disabled connection parameters were removed");
3024}
3025
3026/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003027void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003028{
3029 struct hci_conn_params *params, *tmp;
3030
Johan Hedbergf6c63242014-08-15 21:06:59 +03003031 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3032 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003033
Johan Hedberga2f41a82014-07-04 12:37:19 +03003034 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003035
Andre Guedes15819a72014-02-03 13:56:18 -03003036 BT_DBG("All LE connection parameters were removed");
3037}
3038
Marcel Holtmann1904a852015-01-11 13:50:44 -08003039static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003040{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003041 if (status) {
3042 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003043
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003044 hci_dev_lock(hdev);
3045 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3046 hci_dev_unlock(hdev);
3047 return;
3048 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003049}
3050
Marcel Holtmann1904a852015-01-11 13:50:44 -08003051static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3052 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003053{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003054 /* General inquiry access code (GIAC) */
3055 u8 lap[3] = { 0x33, 0x8b, 0x9e };
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003056 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003057 int err;
3058
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003059 if (status) {
3060 BT_ERR("Failed to disable LE scanning: status %d", status);
3061 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003062 }
3063
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003064 hdev->discovery.scan_start = 0;
3065
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003066 switch (hdev->discovery.type) {
3067 case DISCOV_TYPE_LE:
3068 hci_dev_lock(hdev);
3069 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3070 hci_dev_unlock(hdev);
3071 break;
3072
3073 case DISCOV_TYPE_INTERLEAVED:
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003074 hci_dev_lock(hdev);
3075
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003076 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3077 &hdev->quirks)) {
3078 /* If we were running LE only scan, change discovery
3079 * state. If we were running both LE and BR/EDR inquiry
3080 * simultaneously, and BR/EDR inquiry is already
3081 * finished, stop discovery, otherwise BR/EDR inquiry
Wesley Kuo177d0502015-05-13 10:33:15 +08003082 * will stop discovery when finished. If we will resolve
3083 * remote device name, do not change discovery state.
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003084 */
Wesley Kuo177d0502015-05-13 10:33:15 +08003085 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3086 hdev->discovery.state != DISCOVERY_RESOLVING)
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003087 hci_discovery_set_state(hdev,
3088 DISCOVERY_STOPPED);
3089 } else {
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003090 struct hci_request req;
3091
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003092 hci_inquiry_cache_flush(hdev);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003093
Johan Hedbergbaf880a2015-03-21 08:02:23 +02003094 hci_req_init(&req, hdev);
3095
3096 memset(&cp, 0, sizeof(cp));
3097 memcpy(&cp.lap, lap, sizeof(cp.lap));
3098 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3099 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3100
Jakub Pawlowski07d23342015-03-17 09:04:14 -07003101 err = hci_req_run(&req, inquiry_complete);
3102 if (err) {
3103 BT_ERR("Inquiry request failed: err %d", err);
3104 hci_discovery_set_state(hdev,
3105 DISCOVERY_STOPPED);
3106 }
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003107 }
3108
3109 hci_dev_unlock(hdev);
3110 break;
3111 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003112}
3113
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003114static void le_scan_disable_work(struct work_struct *work)
3115{
3116 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003117 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003118 struct hci_request req;
3119 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003120
3121 BT_DBG("%s", hdev->name);
3122
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003123 cancel_delayed_work_sync(&hdev->le_scan_restart);
3124
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003125 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003126
Andre Guedesb1efcc22014-02-26 20:21:40 -03003127 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003128
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003129 err = hci_req_run(&req, le_scan_disable_work_complete);
3130 if (err)
3131 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003132}
3133
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003134static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3135 u16 opcode)
3136{
3137 unsigned long timeout, duration, scan_start, now;
3138
3139 BT_DBG("%s", hdev->name);
3140
3141 if (status) {
3142 BT_ERR("Failed to restart LE scan: status %d", status);
3143 return;
3144 }
3145
3146 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3147 !hdev->discovery.scan_start)
3148 return;
3149
3150 /* When the scan was started, hdev->le_scan_disable has been queued
3151 * after duration from scan_start. During scan restart this job
3152 * has been canceled, and we need to queue it again after proper
3153 * timeout, to make sure that scan does not run indefinitely.
3154 */
3155 duration = hdev->discovery.scan_duration;
3156 scan_start = hdev->discovery.scan_start;
3157 now = jiffies;
3158 if (now - scan_start <= duration) {
3159 int elapsed;
3160
3161 if (now >= scan_start)
3162 elapsed = now - scan_start;
3163 else
3164 elapsed = ULONG_MAX - scan_start + now;
3165
3166 timeout = duration - elapsed;
3167 } else {
3168 timeout = 0;
3169 }
3170 queue_delayed_work(hdev->workqueue,
3171 &hdev->le_scan_disable, timeout);
3172}
3173
3174static void le_scan_restart_work(struct work_struct *work)
3175{
3176 struct hci_dev *hdev = container_of(work, struct hci_dev,
3177 le_scan_restart.work);
3178 struct hci_request req;
3179 struct hci_cp_le_set_scan_enable cp;
3180 int err;
3181
3182 BT_DBG("%s", hdev->name);
3183
3184 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003185 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003186 return;
3187
3188 hci_req_init(&req, hdev);
3189
3190 hci_req_add_le_scan_disable(&req);
3191
3192 memset(&cp, 0, sizeof(cp));
3193 cp.enable = LE_SCAN_ENABLE;
3194 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3195 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3196
3197 err = hci_req_run(&req, le_scan_restart_work_complete);
3198 if (err)
3199 BT_ERR("Restart LE scan request failed: err %d", err);
3200}
3201
Johan Hedberga1f4c312014-02-27 14:05:41 +02003202/* Copy the Identity Address of the controller.
3203 *
3204 * If the controller has a public BD_ADDR, then by default use that one.
3205 * If this is a LE only controller without a public address, default to
3206 * the static random address.
3207 *
3208 * For debugging purposes it is possible to force controllers with a
3209 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003210 *
3211 * In case BR/EDR has been disabled on a dual-mode controller and
3212 * userspace has configured a static address, then that address
3213 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02003214 */
3215void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3216 u8 *bdaddr_type)
3217{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07003218 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003219 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003220 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003221 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02003222 bacpy(bdaddr, &hdev->static_addr);
3223 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3224 } else {
3225 bacpy(bdaddr, &hdev->bdaddr);
3226 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3227 }
3228}
3229
David Herrmann9be0dab2012-04-22 14:39:57 +02003230/* Alloc HCI device */
3231struct hci_dev *hci_alloc_dev(void)
3232{
3233 struct hci_dev *hdev;
3234
Johan Hedberg27f70f32014-07-21 10:50:06 +03003235 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003236 if (!hdev)
3237 return NULL;
3238
David Herrmannb1b813d2012-04-22 14:39:58 +02003239 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3240 hdev->esco_type = (ESCO_HV1);
3241 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003242 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3243 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003244 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003245 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3246 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
Florian Grandeld2609b32015-06-18 03:16:34 +02003247 hdev->adv_instance_cnt = 0;
3248 hdev->cur_adv_instance = 0x00;
Florian Grandel5d900e42015-06-18 03:16:35 +02003249 hdev->adv_instance_timeout = 0;
David Herrmannb1b813d2012-04-22 14:39:58 +02003250
David Herrmannb1b813d2012-04-22 14:39:58 +02003251 hdev->sniff_max_interval = 800;
3252 hdev->sniff_min_interval = 80;
3253
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003254 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003255 hdev->le_adv_min_interval = 0x0800;
3256 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003257 hdev->le_scan_interval = 0x0060;
3258 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003259 hdev->le_conn_min_interval = 0x0028;
3260 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003261 hdev->le_conn_latency = 0x0000;
3262 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01003263 hdev->le_def_tx_len = 0x001b;
3264 hdev->le_def_tx_time = 0x0148;
3265 hdev->le_max_tx_len = 0x001b;
3266 hdev->le_max_tx_time = 0x0148;
3267 hdev->le_max_rx_len = 0x001b;
3268 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003269
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003270 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003271 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003272 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3273 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003274
David Herrmannb1b813d2012-04-22 14:39:58 +02003275 mutex_init(&hdev->lock);
3276 mutex_init(&hdev->req_lock);
3277
3278 INIT_LIST_HEAD(&hdev->mgmt_pending);
3279 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003280 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003281 INIT_LIST_HEAD(&hdev->uuids);
3282 INIT_LIST_HEAD(&hdev->link_keys);
3283 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003284 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003285 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003286 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003287 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003288 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003289 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003290 INIT_LIST_HEAD(&hdev->conn_hash.list);
Florian Grandeld2609b32015-06-18 03:16:34 +02003291 INIT_LIST_HEAD(&hdev->adv_instances);
David Herrmannb1b813d2012-04-22 14:39:58 +02003292
3293 INIT_WORK(&hdev->rx_work, hci_rx_work);
3294 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3295 INIT_WORK(&hdev->tx_work, hci_tx_work);
3296 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003297 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003298
David Herrmannb1b813d2012-04-22 14:39:58 +02003299 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3300 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3301 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003302 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Florian Grandel5d900e42015-06-18 03:16:35 +02003303 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
David Herrmannb1b813d2012-04-22 14:39:58 +02003304
David Herrmannb1b813d2012-04-22 14:39:58 +02003305 skb_queue_head_init(&hdev->rx_q);
3306 skb_queue_head_init(&hdev->cmd_q);
3307 skb_queue_head_init(&hdev->raw_q);
3308
3309 init_waitqueue_head(&hdev->req_wait_q);
3310
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003311 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003312
David Herrmannb1b813d2012-04-22 14:39:58 +02003313 hci_init_sysfs(hdev);
3314 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003315
3316 return hdev;
3317}
3318EXPORT_SYMBOL(hci_alloc_dev);
3319
3320/* Free HCI device */
3321void hci_free_dev(struct hci_dev *hdev)
3322{
David Herrmann9be0dab2012-04-22 14:39:57 +02003323 /* will free via device release */
3324 put_device(&hdev->dev);
3325}
3326EXPORT_SYMBOL(hci_free_dev);
3327
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328/* Register HCI device */
3329int hci_register_dev(struct hci_dev *hdev)
3330{
David Herrmannb1b813d2012-04-22 14:39:58 +02003331 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332
Marcel Holtmann74292d52014-07-06 15:50:27 +02003333 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 return -EINVAL;
3335
Mat Martineau08add512011-11-02 16:18:36 -07003336 /* Do not allow HCI_AMP devices to register at index 0,
3337 * so the index can be used as the AMP controller ID.
3338 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003339 switch (hdev->dev_type) {
3340 case HCI_BREDR:
3341 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3342 break;
3343 case HCI_AMP:
3344 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3345 break;
3346 default:
3347 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003349
Sasha Levin3df92b32012-05-27 22:36:56 +02003350 if (id < 0)
3351 return id;
3352
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 sprintf(hdev->name, "hci%d", id);
3354 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003355
3356 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3357
Kees Cookd8537542013-07-03 15:04:57 -07003358 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3359 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003360 if (!hdev->workqueue) {
3361 error = -ENOMEM;
3362 goto err;
3363 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003364
Kees Cookd8537542013-07-03 15:04:57 -07003365 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3366 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003367 if (!hdev->req_workqueue) {
3368 destroy_workqueue(hdev->workqueue);
3369 error = -ENOMEM;
3370 goto err;
3371 }
3372
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003373 if (!IS_ERR_OR_NULL(bt_debugfs))
3374 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3375
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003376 dev_set_name(&hdev->dev, "%s", hdev->name);
3377
3378 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003379 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003380 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003382 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003383 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3384 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003385 if (hdev->rfkill) {
3386 if (rfkill_register(hdev->rfkill) < 0) {
3387 rfkill_destroy(hdev->rfkill);
3388 hdev->rfkill = NULL;
3389 }
3390 }
3391
Johan Hedberg5e130362013-09-13 08:58:17 +03003392 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003393 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003394
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003395 hci_dev_set_flag(hdev, HCI_SETUP);
3396 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003397
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003398 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003399 /* Assume BR/EDR support until proven otherwise (such as
3400 * through reading supported features during init.
3401 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003402 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003403 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003404
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003405 write_lock(&hci_dev_list_lock);
3406 list_add(&hdev->list, &hci_dev_list);
3407 write_unlock(&hci_dev_list_lock);
3408
Marcel Holtmann4a964402014-07-02 19:10:33 +02003409 /* Devices that are marked for raw-only usage are unconfigured
3410 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003411 */
3412 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003413 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003414
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003415 hci_sock_dev_event(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003416 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417
Johan Hedberg19202572013-01-14 22:33:51 +02003418 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003419
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003421
David Herrmann33ca9542011-10-08 14:58:49 +02003422err_wqueue:
3423 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003424 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003425err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003426 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003427
David Herrmann33ca9542011-10-08 14:58:49 +02003428 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429}
3430EXPORT_SYMBOL(hci_register_dev);
3431
3432/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003433void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003435 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003436
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003437 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003439 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003440
Sasha Levin3df92b32012-05-27 22:36:56 +02003441 id = hdev->id;
3442
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003443 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003445 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446
3447 hci_dev_do_close(hdev);
3448
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003449 cancel_work_sync(&hdev->power_on);
3450
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003451 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003452 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3453 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003454 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003455 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003456 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003457 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003458
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003459 /* mgmt_index_removed should take care of emptying the
3460 * pending list */
3461 BUG_ON(!list_empty(&hdev->mgmt_pending));
3462
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003463 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003465 if (hdev->rfkill) {
3466 rfkill_unregister(hdev->rfkill);
3467 rfkill_destroy(hdev->rfkill);
3468 }
3469
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003470 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003471
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003472 debugfs_remove_recursive(hdev->debugfs);
3473
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003474 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003475 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003476
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003477 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003478 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003479 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003480 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003481 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003482 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003483 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003484 hci_remote_oob_data_clear(hdev);
Florian Grandeld2609b32015-06-18 03:16:34 +02003485 hci_adv_instances_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003486 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003487 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003488 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003489 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003490
David Herrmanndc946bd2012-01-07 15:47:24 +01003491 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003492
3493 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494}
3495EXPORT_SYMBOL(hci_unregister_dev);
3496
3497/* Suspend HCI device */
3498int hci_suspend_dev(struct hci_dev *hdev)
3499{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003500 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501 return 0;
3502}
3503EXPORT_SYMBOL(hci_suspend_dev);
3504
3505/* Resume HCI device */
3506int hci_resume_dev(struct hci_dev *hdev)
3507{
Marcel Holtmann05fcd4c2015-10-25 23:29:22 +01003508 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509 return 0;
3510}
3511EXPORT_SYMBOL(hci_resume_dev);
3512
Marcel Holtmann75e05692014-11-02 08:15:38 +01003513/* Reset HCI device */
3514int hci_reset_dev(struct hci_dev *hdev)
3515{
3516 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3517 struct sk_buff *skb;
3518
3519 skb = bt_skb_alloc(3, GFP_ATOMIC);
3520 if (!skb)
3521 return -ENOMEM;
3522
3523 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3524 memcpy(skb_put(skb, 3), hw_err, 3);
3525
3526 /* Send Hardware Error to upper stack */
3527 return hci_recv_frame(hdev, skb);
3528}
3529EXPORT_SYMBOL(hci_reset_dev);
3530
Marcel Holtmann76bca882009-11-18 00:40:39 +01003531/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003532int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003533{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003534 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003535 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003536 kfree_skb(skb);
3537 return -ENXIO;
3538 }
3539
Marcel Holtmannfe806dc2015-10-08 03:14:28 +02003540 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3541 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3542 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3543 kfree_skb(skb);
3544 return -EINVAL;
3545 }
3546
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003547 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003548 bt_cb(skb)->incoming = 1;
3549
3550 /* Time stamp */
3551 __net_timestamp(skb);
3552
Marcel Holtmann76bca882009-11-18 00:40:39 +01003553 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003554 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003555
Marcel Holtmann76bca882009-11-18 00:40:39 +01003556 return 0;
3557}
3558EXPORT_SYMBOL(hci_recv_frame);
3559
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003560/* Receive diagnostic message from HCI drivers */
3561int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3562{
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003563 /* Mark as diagnostic packet */
3564 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3565
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003566 /* Time stamp */
3567 __net_timestamp(skb);
3568
Marcel Holtmann581d6fd2015-10-09 16:13:51 +02003569 skb_queue_tail(&hdev->rx_q, skb);
3570 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003571
Marcel Holtmanne875ff82015-10-07 16:38:35 +02003572 return 0;
3573}
3574EXPORT_SYMBOL(hci_recv_diag);
3575
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576/* ---- Interface to upper protocols ---- */
3577
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578int hci_register_cb(struct hci_cb *cb)
3579{
3580 BT_DBG("%p name %s", cb, cb->name);
3581
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003582 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003583 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003584 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585
3586 return 0;
3587}
3588EXPORT_SYMBOL(hci_register_cb);
3589
3590int hci_unregister_cb(struct hci_cb *cb)
3591{
3592 BT_DBG("%p name %s", cb, cb->name);
3593
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003594 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003596 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597
3598 return 0;
3599}
3600EXPORT_SYMBOL(hci_unregister_cb);
3601
Marcel Holtmann51086992013-10-10 14:54:19 -07003602static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003604 int err;
3605
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003606 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003608 /* Time stamp */
3609 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003611 /* Send copy to monitor */
3612 hci_send_to_monitor(hdev, skb);
3613
3614 if (atomic_read(&hdev->promisc)) {
3615 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003616 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 }
3618
3619 /* Get rid of skb owner, prior to sending to the driver. */
3620 skb_orphan(skb);
3621
Marcel Holtmann73d0d3c2015-10-04 23:34:01 +02003622 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3623 kfree_skb(skb);
3624 return;
3625 }
3626
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003627 err = hdev->send(hdev, skb);
3628 if (err < 0) {
3629 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3630 kfree_skb(skb);
3631 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632}
3633
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003634/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003635int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3636 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003637{
3638 struct sk_buff *skb;
3639
3640 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3641
3642 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3643 if (!skb) {
3644 BT_ERR("%s no memory for command", hdev->name);
3645 return -ENOMEM;
3646 }
3647
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003648 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003649 * single-command requests.
3650 */
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01003651 bt_cb(skb)->hci.req_start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02003652
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003654 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655
3656 return 0;
3657}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658
3659/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003660void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661{
3662 struct hci_command_hdr *hdr;
3663
3664 if (!hdev->sent_cmd)
3665 return NULL;
3666
3667 hdr = (void *) hdev->sent_cmd->data;
3668
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003669 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 return NULL;
3671
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003672 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673
3674 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3675}
3676
Loic Poulainfbef1682015-09-29 15:05:44 +02003677/* Send HCI command and wait for command commplete event */
3678struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3679 const void *param, u32 timeout)
3680{
3681 struct sk_buff *skb;
3682
3683 if (!test_bit(HCI_UP, &hdev->flags))
3684 return ERR_PTR(-ENETDOWN);
3685
3686 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3687
3688 hci_req_lock(hdev);
3689 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3690 hci_req_unlock(hdev);
3691
3692 return skb;
3693}
3694EXPORT_SYMBOL(hci_cmd_sync);
3695
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696/* Send ACL data */
3697static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3698{
3699 struct hci_acl_hdr *hdr;
3700 int len = skb->len;
3701
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003702 skb_push(skb, HCI_ACL_HDR_SIZE);
3703 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003704 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003705 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3706 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707}
3708
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003709static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003710 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003712 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713 struct hci_dev *hdev = conn->hdev;
3714 struct sk_buff *list;
3715
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003716 skb->len = skb_headlen(skb);
3717 skb->data_len = 0;
3718
3719 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003720
3721 switch (hdev->dev_type) {
3722 case HCI_BREDR:
3723 hci_add_acl_hdr(skb, conn->handle, flags);
3724 break;
3725 case HCI_AMP:
3726 hci_add_acl_hdr(skb, chan->handle, flags);
3727 break;
3728 default:
3729 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3730 return;
3731 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003732
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003733 list = skb_shinfo(skb)->frag_list;
3734 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 /* Non fragmented */
3736 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3737
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003738 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739 } else {
3740 /* Fragmented */
3741 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3742
3743 skb_shinfo(skb)->frag_list = NULL;
3744
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003745 /* Queue all fragments atomically. We need to use spin_lock_bh
3746 * here because of 6LoWPAN links, as there this function is
3747 * called from softirq and using normal spin lock could cause
3748 * deadlocks.
3749 */
3750 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003752 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003753
3754 flags &= ~ACL_START;
3755 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756 do {
3757 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003758
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003759 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003760 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761
3762 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3763
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003764 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 } while (list);
3766
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003767 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003769}
3770
3771void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3772{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003773 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003774
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003775 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003776
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003777 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003779 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781
3782/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003783void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784{
3785 struct hci_dev *hdev = conn->hdev;
3786 struct hci_sco_hdr hdr;
3787
3788 BT_DBG("%s len %d", hdev->name, skb->len);
3789
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003790 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791 hdr.dlen = skb->len;
3792
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003793 skb_push(skb, HCI_SCO_HDR_SIZE);
3794 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003795 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003796
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003797 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003798
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003800 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802
3803/* ---- HCI TX task (outgoing data) ---- */
3804
3805/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003806static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3807 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808{
3809 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003810 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003811 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003813 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003815
3816 rcu_read_lock();
3817
3818 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003819 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003821
3822 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3823 continue;
3824
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 num++;
3826
3827 if (c->sent < min) {
3828 min = c->sent;
3829 conn = c;
3830 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003831
3832 if (hci_conn_num(hdev, type) == num)
3833 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834 }
3835
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003836 rcu_read_unlock();
3837
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003839 int cnt, q;
3840
3841 switch (conn->type) {
3842 case ACL_LINK:
3843 cnt = hdev->acl_cnt;
3844 break;
3845 case SCO_LINK:
3846 case ESCO_LINK:
3847 cnt = hdev->sco_cnt;
3848 break;
3849 case LE_LINK:
3850 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3851 break;
3852 default:
3853 cnt = 0;
3854 BT_ERR("Unknown link type");
3855 }
3856
3857 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 *quote = q ? q : 1;
3859 } else
3860 *quote = 0;
3861
3862 BT_DBG("conn %p quote %d", conn, *quote);
3863 return conn;
3864}
3865
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003866static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867{
3868 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003869 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870
Ville Tervobae1f5d92011-02-10 22:38:53 -03003871 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003873 rcu_read_lock();
3874
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003876 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003877 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003878 BT_ERR("%s killing stalled connection %pMR",
3879 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003880 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881 }
3882 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003883
3884 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885}
3886
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003887static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3888 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003889{
3890 struct hci_conn_hash *h = &hdev->conn_hash;
3891 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003892 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003893 struct hci_conn *conn;
3894 int cnt, q, conn_num = 0;
3895
3896 BT_DBG("%s", hdev->name);
3897
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003898 rcu_read_lock();
3899
3900 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003901 struct hci_chan *tmp;
3902
3903 if (conn->type != type)
3904 continue;
3905
3906 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3907 continue;
3908
3909 conn_num++;
3910
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003911 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003912 struct sk_buff *skb;
3913
3914 if (skb_queue_empty(&tmp->data_q))
3915 continue;
3916
3917 skb = skb_peek(&tmp->data_q);
3918 if (skb->priority < cur_prio)
3919 continue;
3920
3921 if (skb->priority > cur_prio) {
3922 num = 0;
3923 min = ~0;
3924 cur_prio = skb->priority;
3925 }
3926
3927 num++;
3928
3929 if (conn->sent < min) {
3930 min = conn->sent;
3931 chan = tmp;
3932 }
3933 }
3934
3935 if (hci_conn_num(hdev, type) == conn_num)
3936 break;
3937 }
3938
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003939 rcu_read_unlock();
3940
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003941 if (!chan)
3942 return NULL;
3943
3944 switch (chan->conn->type) {
3945 case ACL_LINK:
3946 cnt = hdev->acl_cnt;
3947 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003948 case AMP_LINK:
3949 cnt = hdev->block_cnt;
3950 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003951 case SCO_LINK:
3952 case ESCO_LINK:
3953 cnt = hdev->sco_cnt;
3954 break;
3955 case LE_LINK:
3956 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3957 break;
3958 default:
3959 cnt = 0;
3960 BT_ERR("Unknown link type");
3961 }
3962
3963 q = cnt / num;
3964 *quote = q ? q : 1;
3965 BT_DBG("chan %p quote %d", chan, *quote);
3966 return chan;
3967}
3968
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003969static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3970{
3971 struct hci_conn_hash *h = &hdev->conn_hash;
3972 struct hci_conn *conn;
3973 int num = 0;
3974
3975 BT_DBG("%s", hdev->name);
3976
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003977 rcu_read_lock();
3978
3979 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003980 struct hci_chan *chan;
3981
3982 if (conn->type != type)
3983 continue;
3984
3985 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3986 continue;
3987
3988 num++;
3989
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003990 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003991 struct sk_buff *skb;
3992
3993 if (chan->sent) {
3994 chan->sent = 0;
3995 continue;
3996 }
3997
3998 if (skb_queue_empty(&chan->data_q))
3999 continue;
4000
4001 skb = skb_peek(&chan->data_q);
4002 if (skb->priority >= HCI_PRIO_MAX - 1)
4003 continue;
4004
4005 skb->priority = HCI_PRIO_MAX - 1;
4006
4007 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004008 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004009 }
4010
4011 if (hci_conn_num(hdev, type) == num)
4012 break;
4013 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004014
4015 rcu_read_unlock();
4016
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004017}
4018
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004019static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4020{
4021 /* Calculate count of blocks used by this packet */
4022 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4023}
4024
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004025static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004027 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028 /* ACL tx timeout must be longer than maximum
4029 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004030 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004031 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004032 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004034}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004036static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004037{
4038 unsigned int cnt = hdev->acl_cnt;
4039 struct hci_chan *chan;
4040 struct sk_buff *skb;
4041 int quote;
4042
4043 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004044
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004045 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004046 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004047 u32 priority = (skb_peek(&chan->data_q))->priority;
4048 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004049 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004050 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004051
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004052 /* Stop if priority has changed */
4053 if (skb->priority < priority)
4054 break;
4055
4056 skb = skb_dequeue(&chan->data_q);
4057
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004058 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004059 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004060
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004061 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062 hdev->acl_last_tx = jiffies;
4063
4064 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004065 chan->sent++;
4066 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067 }
4068 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004069
4070 if (cnt != hdev->acl_cnt)
4071 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072}
4073
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004074static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004075{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004076 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004077 struct hci_chan *chan;
4078 struct sk_buff *skb;
4079 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004080 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004081
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004082 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004083
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004084 BT_DBG("%s", hdev->name);
4085
4086 if (hdev->dev_type == HCI_AMP)
4087 type = AMP_LINK;
4088 else
4089 type = ACL_LINK;
4090
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004091 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004092 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004093 u32 priority = (skb_peek(&chan->data_q))->priority;
4094 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4095 int blocks;
4096
4097 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004098 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004099
4100 /* Stop if priority has changed */
4101 if (skb->priority < priority)
4102 break;
4103
4104 skb = skb_dequeue(&chan->data_q);
4105
4106 blocks = __get_blocks(hdev, skb);
4107 if (blocks > hdev->block_cnt)
4108 return;
4109
4110 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004111 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004112
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004113 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004114 hdev->acl_last_tx = jiffies;
4115
4116 hdev->block_cnt -= blocks;
4117 quote -= blocks;
4118
4119 chan->sent += blocks;
4120 chan->conn->sent += blocks;
4121 }
4122 }
4123
4124 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004125 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004126}
4127
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004128static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004129{
4130 BT_DBG("%s", hdev->name);
4131
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004132 /* No ACL link over BR/EDR controller */
4133 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4134 return;
4135
4136 /* No AMP link over AMP controller */
4137 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004138 return;
4139
4140 switch (hdev->flow_ctl_mode) {
4141 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4142 hci_sched_acl_pkt(hdev);
4143 break;
4144
4145 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4146 hci_sched_acl_blk(hdev);
4147 break;
4148 }
4149}
4150
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004152static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153{
4154 struct hci_conn *conn;
4155 struct sk_buff *skb;
4156 int quote;
4157
4158 BT_DBG("%s", hdev->name);
4159
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004160 if (!hci_conn_num(hdev, SCO_LINK))
4161 return;
4162
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4164 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4165 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004166 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167
4168 conn->sent++;
4169 if (conn->sent == ~0)
4170 conn->sent = 0;
4171 }
4172 }
4173}
4174
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004175static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004176{
4177 struct hci_conn *conn;
4178 struct sk_buff *skb;
4179 int quote;
4180
4181 BT_DBG("%s", hdev->name);
4182
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004183 if (!hci_conn_num(hdev, ESCO_LINK))
4184 return;
4185
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004186 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4187 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004188 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4189 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004190 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004191
4192 conn->sent++;
4193 if (conn->sent == ~0)
4194 conn->sent = 0;
4195 }
4196 }
4197}
4198
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004199static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004200{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004201 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004202 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004203 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004204
4205 BT_DBG("%s", hdev->name);
4206
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004207 if (!hci_conn_num(hdev, LE_LINK))
4208 return;
4209
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004210 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004211 /* LE tx timeout must be longer than maximum
4212 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004213 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004214 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004215 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004216 }
4217
4218 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004219 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004220 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004221 u32 priority = (skb_peek(&chan->data_q))->priority;
4222 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004223 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004224 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004225
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004226 /* Stop if priority has changed */
4227 if (skb->priority < priority)
4228 break;
4229
4230 skb = skb_dequeue(&chan->data_q);
4231
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004232 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004233 hdev->le_last_tx = jiffies;
4234
4235 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004236 chan->sent++;
4237 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004238 }
4239 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004240
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004241 if (hdev->le_pkts)
4242 hdev->le_cnt = cnt;
4243 else
4244 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004245
4246 if (cnt != tmp)
4247 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004248}
4249
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004250static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004252 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 struct sk_buff *skb;
4254
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004255 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004256 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004258 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07004259 /* Schedule queues and send stuff to HCI driver */
4260 hci_sched_acl(hdev);
4261 hci_sched_sco(hdev);
4262 hci_sched_esco(hdev);
4263 hci_sched_le(hdev);
4264 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004265
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266 /* Send next queued raw (unknown type) packet */
4267 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004268 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269}
4270
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004271/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272
4273/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004274static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275{
4276 struct hci_acl_hdr *hdr = (void *) skb->data;
4277 struct hci_conn *conn;
4278 __u16 handle, flags;
4279
4280 skb_pull(skb, HCI_ACL_HDR_SIZE);
4281
4282 handle = __le16_to_cpu(hdr->handle);
4283 flags = hci_flags(handle);
4284 handle = hci_handle(handle);
4285
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004286 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004287 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288
4289 hdev->stat.acl_rx++;
4290
4291 hci_dev_lock(hdev);
4292 conn = hci_conn_hash_lookup_handle(hdev, handle);
4293 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004294
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004296 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004297
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004299 l2cap_recv_acldata(conn, skb, flags);
4300 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004302 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004303 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304 }
4305
4306 kfree_skb(skb);
4307}
4308
4309/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004310static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311{
4312 struct hci_sco_hdr *hdr = (void *) skb->data;
4313 struct hci_conn *conn;
4314 __u16 handle;
4315
4316 skb_pull(skb, HCI_SCO_HDR_SIZE);
4317
4318 handle = __le16_to_cpu(hdr->handle);
4319
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004320 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321
4322 hdev->stat.sco_rx++;
4323
4324 hci_dev_lock(hdev);
4325 conn = hci_conn_hash_lookup_handle(hdev, handle);
4326 hci_dev_unlock(hdev);
4327
4328 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004330 sco_recv_scodata(conn, skb);
4331 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004333 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004334 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335 }
4336
4337 kfree_skb(skb);
4338}
4339
Johan Hedberg9238f362013-03-05 20:37:48 +02004340static bool hci_req_is_complete(struct hci_dev *hdev)
4341{
4342 struct sk_buff *skb;
4343
4344 skb = skb_peek(&hdev->cmd_q);
4345 if (!skb)
4346 return true;
4347
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004348 return bt_cb(skb)->hci.req_start;
Johan Hedberg9238f362013-03-05 20:37:48 +02004349}
4350
Johan Hedberg42c6b122013-03-05 20:37:49 +02004351static void hci_resend_last(struct hci_dev *hdev)
4352{
4353 struct hci_command_hdr *sent;
4354 struct sk_buff *skb;
4355 u16 opcode;
4356
4357 if (!hdev->sent_cmd)
4358 return;
4359
4360 sent = (void *) hdev->sent_cmd->data;
4361 opcode = __le16_to_cpu(sent->opcode);
4362 if (opcode == HCI_OP_RESET)
4363 return;
4364
4365 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4366 if (!skb)
4367 return;
4368
4369 skb_queue_head(&hdev->cmd_q, skb);
4370 queue_work(hdev->workqueue, &hdev->cmd_work);
4371}
4372
Johan Hedberge62144872015-04-02 13:41:08 +03004373void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4374 hci_req_complete_t *req_complete,
4375 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004376{
Johan Hedberg9238f362013-03-05 20:37:48 +02004377 struct sk_buff *skb;
4378 unsigned long flags;
4379
4380 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4381
Johan Hedberg42c6b122013-03-05 20:37:49 +02004382 /* If the completed command doesn't match the last one that was
4383 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004384 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004385 if (!hci_sent_cmd_data(hdev, opcode)) {
4386 /* Some CSR based controllers generate a spontaneous
4387 * reset complete event during init and any pending
4388 * command will never be completed. In such a case we
4389 * need to resend whatever was the last sent
4390 * command.
4391 */
4392 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4393 hci_resend_last(hdev);
4394
Johan Hedberg9238f362013-03-05 20:37:48 +02004395 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004396 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004397
4398 /* If the command succeeded and there's still more commands in
4399 * this request the request is not yet complete.
4400 */
4401 if (!status && !hci_req_is_complete(hdev))
4402 return;
4403
4404 /* If this was the last command in a request the complete
4405 * callback would be found in hdev->sent_cmd instead of the
4406 * command queue (hdev->cmd_q).
4407 */
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004408 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4409 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
Johan Hedberge62144872015-04-02 13:41:08 +03004410 return;
4411 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004412
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004413 if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
4414 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
Johan Hedberge62144872015-04-02 13:41:08 +03004415 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004416 }
4417
4418 /* Remove all pending commands belonging to this request */
4419 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4420 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004421 if (bt_cb(skb)->hci.req_start) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004422 __skb_queue_head(&hdev->cmd_q, skb);
4423 break;
4424 }
4425
Marcel Holtmann242c0eb2015-10-25 22:45:53 +01004426 *req_complete = bt_cb(skb)->hci.req_complete;
4427 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
Johan Hedberg9238f362013-03-05 20:37:48 +02004428 kfree_skb(skb);
4429 }
4430 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004431}
4432
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004433static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004435 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436 struct sk_buff *skb;
4437
4438 BT_DBG("%s", hdev->name);
4439
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004441 /* Send copy to monitor */
4442 hci_send_to_monitor(hdev, skb);
4443
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444 if (atomic_read(&hdev->promisc)) {
4445 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004446 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447 }
4448
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004449 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450 kfree_skb(skb);
4451 continue;
4452 }
4453
4454 if (test_bit(HCI_INIT, &hdev->flags)) {
4455 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004456 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 case HCI_ACLDATA_PKT:
4458 case HCI_SCODATA_PKT:
4459 kfree_skb(skb);
4460 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004461 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462 }
4463
4464 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004465 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004467 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468 hci_event_packet(hdev, skb);
4469 break;
4470
4471 case HCI_ACLDATA_PKT:
4472 BT_DBG("%s ACL data packet", hdev->name);
4473 hci_acldata_packet(hdev, skb);
4474 break;
4475
4476 case HCI_SCODATA_PKT:
4477 BT_DBG("%s SCO data packet", hdev->name);
4478 hci_scodata_packet(hdev, skb);
4479 break;
4480
4481 default:
4482 kfree_skb(skb);
4483 break;
4484 }
4485 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486}
4487
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004488static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004490 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491 struct sk_buff *skb;
4492
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004493 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4494 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004497 if (atomic_read(&hdev->cmd_cnt)) {
4498 skb = skb_dequeue(&hdev->cmd_q);
4499 if (!skb)
4500 return;
4501
Wei Yongjun7585b972009-02-25 18:29:52 +08004502 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004504 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004505 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004507 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004508 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004509 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004510 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004511 schedule_delayed_work(&hdev->cmd_timer,
4512 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 } else {
4514 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004515 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516 }
4517 }
4518}