blob: 85a0655c41232a0feb524530f735cf80c8201014 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
54DEFINE_RWLOCK(hci_cb_list_lock);
55
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmann111902f2014-06-21 04:53:17 +020083 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
Marcel Holtmann111902f2014-06-21 04:53:17 +0200109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
Marcel Holtmann111902f2014-06-21 04:53:17 +0200130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* ---- HCI requests ---- */
143
Marcel Holtmann1904a852015-01-11 13:50:44 -0800144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
152 }
153}
154
155static void hci_req_cancel(struct hci_dev *hdev, int err)
156{
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
163 }
164}
165
Fengguang Wu77a63e02013-04-20 16:24:31 +0300166static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300168{
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
172
173 hci_dev_lock(hdev);
174
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
177
178 hci_dev_unlock(hdev);
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
186 }
187
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
195 }
196
Johan Hedberg75e84b72013-04-02 13:35:04 +0300197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
200 }
201
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
205 }
206
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
209
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
212
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
215
216failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
219}
220
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300221struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300222 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300223{
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
226 int err = 0;
227
228 BT_DBG("%s", hdev->name);
229
230 hci_req_init(&req, hdev);
231
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300232 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300233
234 hdev->req_status = HCI_REQ_PEND;
235
Johan Hedberg75e84b72013-04-02 13:35:04 +0300236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
238
Chan-yeol Park039fada2014-10-31 14:23:06 +0900239 err = hci_req_run(&req, hci_req_sync_complete);
240 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200242 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900243 return ERR_PTR(err);
244 }
245
Johan Hedberg75e84b72013-04-02 13:35:04 +0300246 schedule_timeout(timeout);
247
248 remove_wait_queue(&hdev->req_wait_q, &wait);
249
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
252
253 switch (hdev->req_status) {
254 case HCI_REQ_DONE:
255 err = -bt_to_errno(hdev->req_result);
256 break;
257
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
260 break;
261
262 default:
263 err = -ETIMEDOUT;
264 break;
265 }
266
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 if (err < 0)
272 return ERR_PTR(err);
273
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300274 return hci_get_cmd_complete(hdev, opcode, event);
275}
276EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300279 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300280{
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300282}
283EXPORT_SYMBOL(__hci_cmd_sync);
284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200286static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200287 void (*func)(struct hci_request *req,
288 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200289 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 DECLARE_WAITQUEUE(wait, current);
293 int err = 0;
294
295 BT_DBG("%s start", hdev->name);
296
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_init(&req, hdev);
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 hdev->req_status = HCI_REQ_PEND;
300
Johan Hedberg42c6b122013-03-05 20:37:49 +0200301 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200302
Chan-yeol Park039fada2014-10-31 14:23:06 +0900303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
305
Johan Hedberg42c6b122013-03-05 20:37:49 +0200306 err = hci_req_run(&req, hci_req_sync_complete);
307 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200308 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300309
Chan-yeol Park039fada2014-10-31 14:23:06 +0900310 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200311 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900312
Andre Guedes920c8302013-03-08 11:20:15 -0300313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 */
Andre Guedes920c8302013-03-08 11:20:15 -0300318 if (err == -ENODATA)
319 return 0;
320
321 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200322 }
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 schedule_timeout(timeout);
325
326 remove_wait_queue(&hdev->req_wait_q, &wait);
327
328 if (signal_pending(current))
329 return -EINTR;
330
331 switch (hdev->req_status) {
332 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700333 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 break;
335
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
338 break;
339
340 default:
341 err = -ETIMEDOUT;
342 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Johan Hedberga5040ef2011-01-10 13:28:59 +0200345 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 BT_DBG("%s end: err %d", hdev->name, err);
348
349 return err;
350}
351
Johan Hedberg01178cd2013-03-05 20:37:41 +0200352static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200353 void (*req)(struct hci_request *req,
354 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200355 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
357 int ret;
358
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200359 if (!test_bit(HCI_UP, &hdev->flags))
360 return -ENETDOWN;
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 /* Serialize all requests */
363 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200364 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 hci_req_unlock(hdev);
366
367 return ret;
368}
369
Johan Hedberg42c6b122013-03-05 20:37:49 +0200370static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200372 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200386 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200388
389 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
392
Johan Hedberg42c6b122013-03-05 20:37:49 +0200393static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200394{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200396
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200397 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300399
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300406 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300408
409 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700411
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700412 /* Read Flow Control Mode */
413 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700415 /* Read Location Data */
416 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200417}
418
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200420{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200421 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200422
423 BT_DBG("%s %ld", hdev->name, opt);
424
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300425 /* Reset */
426 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300428
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200429 switch (hdev->dev_type) {
430 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200432 break;
433
434 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200436 break;
437
438 default:
439 BT_ERR("Unknown device type %d", hdev->dev_type);
440 break;
441 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200442}
443
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200446 __le16 param;
447 __u8 flt_type;
448
449 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200450 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451
452 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200454
455 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200456 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200457
458 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200459 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200460
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700461 /* Read Number of Supported IAC */
462 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700464 /* Read Current IAC LAP */
465 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467 /* Clear Event Filters */
468 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200469 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200470
471 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700472 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200473 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200474}
475
Johan Hedberg42c6b122013-03-05 20:37:49 +0200476static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200477{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300478 struct hci_dev *hdev = req->hdev;
479
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482
483 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200484 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200485
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800486 /* Read LE Supported States */
487 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800492 /* Clear LE White List */
493 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300494
495 /* LE-only controllers have LE implicitly enabled */
496 if (!lmp_bredr_capable(hdev))
497 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498}
499
Johan Hedberg42c6b122013-03-05 20:37:49 +0200500static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200501{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502 struct hci_dev *hdev = req->hdev;
503
Johan Hedberg2177bab2013-03-05 20:37:43 +0200504 /* The second byte is 0xff instead of 0x9f (two reserved bits
505 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
506 * command otherwise.
507 */
508 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
509
510 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
511 * any event mask for pre 1.2 devices.
512 */
513 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
514 return;
515
516 if (lmp_bredr_capable(hdev)) {
517 events[4] |= 0x01; /* Flow Specification Complete */
518 events[4] |= 0x02; /* Inquiry Result with RSSI */
519 events[4] |= 0x04; /* Read Remote Extended Features Complete */
520 events[5] |= 0x08; /* Synchronous Connection Complete */
521 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700522 } else {
523 /* Use a different default for LE-only devices */
524 memset(events, 0, sizeof(events));
525 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700526 events[1] |= 0x08; /* Read Remote Version Information Complete */
527 events[1] |= 0x20; /* Command Complete */
528 events[1] |= 0x40; /* Command Status */
529 events[1] |= 0x80; /* Hardware Error */
530 events[2] |= 0x04; /* Number of Completed Packets */
531 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200532
533 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
534 events[0] |= 0x80; /* Encryption Change */
535 events[5] |= 0x80; /* Encryption Key Refresh Complete */
536 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200537 }
538
539 if (lmp_inq_rssi_capable(hdev))
540 events[4] |= 0x02; /* Inquiry Result with RSSI */
541
542 if (lmp_sniffsubr_capable(hdev))
543 events[5] |= 0x20; /* Sniff Subrating */
544
545 if (lmp_pause_enc_capable(hdev))
546 events[5] |= 0x80; /* Encryption Key Refresh Complete */
547
548 if (lmp_ext_inq_capable(hdev))
549 events[5] |= 0x40; /* Extended Inquiry Result */
550
551 if (lmp_no_flush_capable(hdev))
552 events[7] |= 0x01; /* Enhanced Flush Complete */
553
554 if (lmp_lsto_capable(hdev))
555 events[6] |= 0x80; /* Link Supervision Timeout Changed */
556
557 if (lmp_ssp_capable(hdev)) {
558 events[6] |= 0x01; /* IO Capability Request */
559 events[6] |= 0x02; /* IO Capability Response */
560 events[6] |= 0x04; /* User Confirmation Request */
561 events[6] |= 0x08; /* User Passkey Request */
562 events[6] |= 0x10; /* Remote OOB Data Request */
563 events[6] |= 0x20; /* Simple Pairing Complete */
564 events[7] |= 0x04; /* User Passkey Notification */
565 events[7] |= 0x08; /* Keypress Notification */
566 events[7] |= 0x10; /* Remote Host Supported
567 * Features Notification
568 */
569 }
570
571 if (lmp_le_capable(hdev))
572 events[7] |= 0x20; /* LE Meta-Event */
573
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575}
576
Johan Hedberg42c6b122013-03-05 20:37:49 +0200577static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200578{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579 struct hci_dev *hdev = req->hdev;
580
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200582 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300583 else
584 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200585
586 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200587 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200588
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100589 /* All Bluetooth 1.2 and later controllers should support the
590 * HCI command for reading the local supported commands.
591 *
592 * Unfortunately some controllers indicate Bluetooth 1.2 support,
593 * but do not have support for this command. If that is the case,
594 * the driver can quirk the behavior and skip reading the local
595 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300596 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100597 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
598 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200599 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200600
601 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700602 /* When SSP is available, then the host features page
603 * should also be available as well. However some
604 * controllers list the max_page as 0 as long as SSP
605 * has not been enabled. To achieve proper debugging
606 * output, force the minimum max_page to 1 at least.
607 */
608 hdev->max_page = 0x01;
609
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
611 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800612
Johan Hedberg42c6b122013-03-05 20:37:49 +0200613 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
614 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200615 } else {
616 struct hci_cp_write_eir cp;
617
618 memset(hdev->eir, 0, sizeof(hdev->eir));
619 memset(&cp, 0, sizeof(cp));
620
Johan Hedberg42c6b122013-03-05 20:37:49 +0200621 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200622 }
623 }
624
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800625 if (lmp_inq_rssi_capable(hdev) ||
626 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800627 u8 mode;
628
629 /* If Extended Inquiry Result events are supported, then
630 * they are clearly preferred over Inquiry Result with RSSI
631 * events.
632 */
633 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
634
635 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
636 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200637
638 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200639 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200640
641 if (lmp_ext_feat_capable(hdev)) {
642 struct hci_cp_read_local_ext_features cp;
643
644 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200645 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
646 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200647 }
648
649 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
650 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200651 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
652 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200653 }
654}
655
Johan Hedberg42c6b122013-03-05 20:37:49 +0200656static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200657{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200658 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200659 struct hci_cp_write_def_link_policy cp;
660 u16 link_policy = 0;
661
662 if (lmp_rswitch_capable(hdev))
663 link_policy |= HCI_LP_RSWITCH;
664 if (lmp_hold_capable(hdev))
665 link_policy |= HCI_LP_HOLD;
666 if (lmp_sniff_capable(hdev))
667 link_policy |= HCI_LP_SNIFF;
668 if (lmp_park_capable(hdev))
669 link_policy |= HCI_LP_PARK;
670
671 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200672 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200673}
674
Johan Hedberg42c6b122013-03-05 20:37:49 +0200675static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200676{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200678 struct hci_cp_write_le_host_supported cp;
679
Johan Hedbergc73eee92013-04-19 18:35:21 +0300680 /* LE-only devices do not support explicit enablement */
681 if (!lmp_bredr_capable(hdev))
682 return;
683
Johan Hedberg2177bab2013-03-05 20:37:43 +0200684 memset(&cp, 0, sizeof(cp));
685
686 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
687 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200688 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200689 }
690
691 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200692 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
693 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200694}
695
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300696static void hci_set_event_mask_page_2(struct hci_request *req)
697{
698 struct hci_dev *hdev = req->hdev;
699 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
700
701 /* If Connectionless Slave Broadcast master role is supported
702 * enable all necessary events for it.
703 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800704 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300705 events[1] |= 0x40; /* Triggered Clock Capture */
706 events[1] |= 0x80; /* Synchronization Train Complete */
707 events[2] |= 0x10; /* Slave Page Response Timeout */
708 events[2] |= 0x20; /* CSB Channel Map Change */
709 }
710
711 /* If Connectionless Slave Broadcast slave role is supported
712 * enable all necessary events for it.
713 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800714 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300715 events[2] |= 0x01; /* Synchronization Train Received */
716 events[2] |= 0x02; /* CSB Receive */
717 events[2] |= 0x04; /* CSB Timeout */
718 events[2] |= 0x08; /* Truncated Page Complete */
719 }
720
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800721 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200722 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800723 events[2] |= 0x80;
724
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300725 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
726}
727
Johan Hedberg42c6b122013-03-05 20:37:49 +0200728static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200729{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300731 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200733 hci_setup_event_mask(req);
734
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800735 if (hdev->commands[6] & 0x20) {
736 struct hci_cp_read_stored_link_key cp;
737
738 bacpy(&cp.bdaddr, BDADDR_ANY);
739 cp.read_all = 0x01;
740 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
741 }
742
Johan Hedberg2177bab2013-03-05 20:37:43 +0200743 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200744 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200745
Marcel Holtmann417287d2014-12-11 20:21:54 +0100746 if (hdev->commands[8] & 0x01)
747 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
748
749 /* Some older Broadcom based Bluetooth 1.2 controllers do not
750 * support the Read Page Scan Type command. Check support for
751 * this command in the bit mask of supported commands.
752 */
753 if (hdev->commands[13] & 0x01)
754 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
755
Andre Guedes9193c6e2014-07-01 18:10:09 -0300756 if (lmp_le_capable(hdev)) {
757 u8 events[8];
758
759 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200760 events[0] = 0x0f;
761
762 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
763 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300764
765 /* If controller supports the Connection Parameters Request
766 * Link Layer Procedure, enable the corresponding event.
767 */
768 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
769 events[0] |= 0x20; /* LE Remote Connection
770 * Parameter Request
771 */
772
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100773 /* If the controller supports the Data Length Extension
774 * feature, enable the corresponding event.
775 */
776 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
777 events[0] |= 0x40; /* LE Data Length Change */
778
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100779 /* If the controller supports Extended Scanner Filter
780 * Policies, enable the correspondig event.
781 */
782 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
783 events[1] |= 0x04; /* LE Direct Advertising
784 * Report
785 */
786
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100787 /* If the controller supports the LE Read Local P-256
788 * Public Key command, enable the corresponding event.
789 */
790 if (hdev->commands[34] & 0x02)
791 events[0] |= 0x80; /* LE Read Local P-256
792 * Public Key Complete
793 */
794
795 /* If the controller supports the LE Generate DHKey
796 * command, enable the corresponding event.
797 */
798 if (hdev->commands[34] & 0x04)
799 events[1] |= 0x01; /* LE Generate DHKey Complete */
800
Andre Guedes9193c6e2014-07-01 18:10:09 -0300801 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
802 events);
803
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200804 if (hdev->commands[25] & 0x40) {
805 /* Read LE Advertising Channel TX Power */
806 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
807 }
808
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100809 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
810 /* Read LE Maximum Data Length */
811 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
812
813 /* Read LE Suggested Default Data Length */
814 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
815 }
816
Johan Hedberg42c6b122013-03-05 20:37:49 +0200817 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300818 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300819
820 /* Read features beyond page 1 if available */
821 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
822 struct hci_cp_read_local_ext_features cp;
823
824 cp.page = p;
825 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
826 sizeof(cp), &cp);
827 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200828}
829
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300830static void hci_init4_req(struct hci_request *req, unsigned long opt)
831{
832 struct hci_dev *hdev = req->hdev;
833
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800834 /* Some Broadcom based Bluetooth controllers do not support the
835 * Delete Stored Link Key command. They are clearly indicating its
836 * absence in the bit mask of supported commands.
837 *
838 * Check the supported commands and only if the the command is marked
839 * as supported send it. If not supported assume that the controller
840 * does not have actual support for stored link keys which makes this
841 * command redundant anyway.
842 *
843 * Some controllers indicate that they support handling deleting
844 * stored link keys, but they don't. The quirk lets a driver
845 * just disable this command.
846 */
847 if (hdev->commands[6] & 0x80 &&
848 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
849 struct hci_cp_delete_stored_link_key cp;
850
851 bacpy(&cp.bdaddr, BDADDR_ANY);
852 cp.delete_all = 0x01;
853 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
854 sizeof(cp), &cp);
855 }
856
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300857 /* Set event mask page 2 if the HCI command for it is supported */
858 if (hdev->commands[22] & 0x04)
859 hci_set_event_mask_page_2(req);
860
Marcel Holtmann109e3192014-07-23 19:24:56 +0200861 /* Read local codec list if the HCI command is supported */
862 if (hdev->commands[29] & 0x20)
863 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
864
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200865 /* Get MWS transport configuration if the HCI command is supported */
866 if (hdev->commands[30] & 0x08)
867 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
868
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300869 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800870 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300871 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800872
873 /* Enable Secure Connections if supported and configured */
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800874 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
875 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800876 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800877
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800878 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
879 sizeof(support), &support);
880 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300881}
882
Johan Hedberg2177bab2013-03-05 20:37:43 +0200883static int __hci_init(struct hci_dev *hdev)
884{
885 int err;
886
887 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
888 if (err < 0)
889 return err;
890
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700891 /* The Device Under Test (DUT) mode is special and available for
892 * all controller types. So just create it early on.
893 */
894 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
895 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
896 &dut_mode_fops);
897 }
898
Johan Hedberg2177bab2013-03-05 20:37:43 +0200899 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
900 * BR/EDR/LE type controllers. AMP controllers only need the
901 * first stage init.
902 */
903 if (hdev->dev_type != HCI_BREDR)
904 return 0;
905
906 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
907 if (err < 0)
908 return err;
909
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300910 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
911 if (err < 0)
912 return err;
913
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700914 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
915 if (err < 0)
916 return err;
917
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800918 /* This function is only called when the controller is actually in
919 * configured state. When the controller is marked as unconfigured,
920 * this initialization procedure is not run.
921 *
922 * It means that it is possible that a controller runs through its
923 * setup phase and then discovers missing settings. If that is the
924 * case, then this function will not be called. It then will only
925 * be called during the config phase.
926 *
927 * So only when in setup phase or config phase, create the debugfs
928 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700929 */
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800930 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
931 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700932 return 0;
933
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100934 hci_debugfs_create_common(hdev);
935
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100936 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100937 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700938
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800939 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100940 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700941
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700942 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200943}
944
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200945static void hci_init0_req(struct hci_request *req, unsigned long opt)
946{
947 struct hci_dev *hdev = req->hdev;
948
949 BT_DBG("%s %ld", hdev->name, opt);
950
951 /* Reset */
952 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
953 hci_reset_req(req, 0);
954
955 /* Read Local Version */
956 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
957
958 /* Read BD Address */
959 if (hdev->set_bdaddr)
960 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
961}
962
963static int __hci_unconf_init(struct hci_dev *hdev)
964{
965 int err;
966
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200967 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
968 return 0;
969
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200970 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
971 if (err < 0)
972 return err;
973
974 return 0;
975}
976
Johan Hedberg42c6b122013-03-05 20:37:49 +0200977static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978{
979 __u8 scan = opt;
980
Johan Hedberg42c6b122013-03-05 20:37:49 +0200981 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200984 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985}
986
Johan Hedberg42c6b122013-03-05 20:37:49 +0200987static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988{
989 __u8 auth = opt;
990
Johan Hedberg42c6b122013-03-05 20:37:49 +0200991 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
993 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200994 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995}
996
Johan Hedberg42c6b122013-03-05 20:37:49 +0200997static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998{
999 __u8 encrypt = opt;
1000
Johan Hedberg42c6b122013-03-05 20:37:49 +02001001 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001003 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005}
1006
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001008{
1009 __le16 policy = cpu_to_le16(opt);
1010
Johan Hedberg42c6b122013-03-05 20:37:49 +02001011 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001012
1013 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001014 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001015}
1016
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001017/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 * Device is held on return. */
1019struct hci_dev *hci_dev_get(int index)
1020{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001021 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
1023 BT_DBG("%d", index);
1024
1025 if (index < 0)
1026 return NULL;
1027
1028 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001029 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 if (d->id == index) {
1031 hdev = hci_dev_hold(d);
1032 break;
1033 }
1034 }
1035 read_unlock(&hci_dev_list_lock);
1036 return hdev;
1037}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038
1039/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001040
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001041bool hci_discovery_active(struct hci_dev *hdev)
1042{
1043 struct discovery_state *discov = &hdev->discovery;
1044
Andre Guedes6fbe1952012-02-03 17:47:58 -03001045 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001046 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001047 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001048 return true;
1049
Andre Guedes6fbe1952012-02-03 17:47:58 -03001050 default:
1051 return false;
1052 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001053}
1054
Johan Hedbergff9ef572012-01-04 14:23:45 +02001055void hci_discovery_set_state(struct hci_dev *hdev, int state)
1056{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001057 int old_state = hdev->discovery.state;
1058
Johan Hedbergff9ef572012-01-04 14:23:45 +02001059 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1060
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001061 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001062 return;
1063
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001064 hdev->discovery.state = state;
1065
Johan Hedbergff9ef572012-01-04 14:23:45 +02001066 switch (state) {
1067 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001068 hci_update_background_scan(hdev);
1069
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001070 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001071 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001072 break;
1073 case DISCOVERY_STARTING:
1074 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001075 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001076 mgmt_discovering(hdev, 1);
1077 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001078 case DISCOVERY_RESOLVING:
1079 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001080 case DISCOVERY_STOPPING:
1081 break;
1082 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001083}
1084
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001085void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086{
Johan Hedberg30883512012-01-04 14:16:21 +02001087 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001088 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
Johan Hedberg561aafb2012-01-04 13:31:59 +02001090 list_for_each_entry_safe(p, n, &cache->all, all) {
1091 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001092 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001094
1095 INIT_LIST_HEAD(&cache->unknown);
1096 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097}
1098
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001099struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1100 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101{
Johan Hedberg30883512012-01-04 14:16:21 +02001102 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 struct inquiry_entry *e;
1104
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001105 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
Johan Hedberg561aafb2012-01-04 13:31:59 +02001107 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001109 return e;
1110 }
1111
1112 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113}
1114
Johan Hedberg561aafb2012-01-04 13:31:59 +02001115struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001116 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001117{
Johan Hedberg30883512012-01-04 14:16:21 +02001118 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001119 struct inquiry_entry *e;
1120
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001121 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001122
1123 list_for_each_entry(e, &cache->unknown, list) {
1124 if (!bacmp(&e->data.bdaddr, bdaddr))
1125 return e;
1126 }
1127
1128 return NULL;
1129}
1130
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001131struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001132 bdaddr_t *bdaddr,
1133 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001134{
1135 struct discovery_state *cache = &hdev->discovery;
1136 struct inquiry_entry *e;
1137
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001138 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001139
1140 list_for_each_entry(e, &cache->resolve, list) {
1141 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1142 return e;
1143 if (!bacmp(&e->data.bdaddr, bdaddr))
1144 return e;
1145 }
1146
1147 return NULL;
1148}
1149
Johan Hedberga3d4e202012-01-09 00:53:02 +02001150void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001151 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001152{
1153 struct discovery_state *cache = &hdev->discovery;
1154 struct list_head *pos = &cache->resolve;
1155 struct inquiry_entry *p;
1156
1157 list_del(&ie->list);
1158
1159 list_for_each_entry(p, &cache->resolve, list) {
1160 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001161 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001162 break;
1163 pos = &p->list;
1164 }
1165
1166 list_add(&ie->list, pos);
1167}
1168
Marcel Holtmannaf589252014-07-01 14:11:20 +02001169u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1170 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171{
Johan Hedberg30883512012-01-04 14:16:21 +02001172 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001173 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001174 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001176 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
Johan Hedberg6928a922014-10-26 20:46:09 +01001178 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001179
Marcel Holtmannaf589252014-07-01 14:11:20 +02001180 if (!data->ssp_mode)
1181 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001182
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001183 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001184 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001185 if (!ie->data.ssp_mode)
1186 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001187
Johan Hedberga3d4e202012-01-09 00:53:02 +02001188 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001189 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001190 ie->data.rssi = data->rssi;
1191 hci_inquiry_cache_update_resolve(hdev, ie);
1192 }
1193
Johan Hedberg561aafb2012-01-04 13:31:59 +02001194 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001195 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001196
Johan Hedberg561aafb2012-01-04 13:31:59 +02001197 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001198 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001199 if (!ie) {
1200 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1201 goto done;
1202 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001203
1204 list_add(&ie->all, &cache->all);
1205
1206 if (name_known) {
1207 ie->name_state = NAME_KNOWN;
1208 } else {
1209 ie->name_state = NAME_NOT_KNOWN;
1210 list_add(&ie->list, &cache->unknown);
1211 }
1212
1213update:
1214 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001215 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001216 ie->name_state = NAME_KNOWN;
1217 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 }
1219
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001220 memcpy(&ie->data, data, sizeof(*data));
1221 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001223
1224 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001225 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001226
Marcel Holtmannaf589252014-07-01 14:11:20 +02001227done:
1228 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230
1231static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1232{
Johan Hedberg30883512012-01-04 14:16:21 +02001233 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 struct inquiry_info *info = (struct inquiry_info *) buf;
1235 struct inquiry_entry *e;
1236 int copied = 0;
1237
Johan Hedberg561aafb2012-01-04 13:31:59 +02001238 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001240
1241 if (copied >= num)
1242 break;
1243
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 bacpy(&info->bdaddr, &data->bdaddr);
1245 info->pscan_rep_mode = data->pscan_rep_mode;
1246 info->pscan_period_mode = data->pscan_period_mode;
1247 info->pscan_mode = data->pscan_mode;
1248 memcpy(info->dev_class, data->dev_class, 3);
1249 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001250
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001252 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 }
1254
1255 BT_DBG("cache %p, copied %d", cache, copied);
1256 return copied;
1257}
1258
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260{
1261 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 struct hci_cp_inquiry cp;
1264
1265 BT_DBG("%s", hdev->name);
1266
1267 if (test_bit(HCI_INQUIRY, &hdev->flags))
1268 return;
1269
1270 /* Start Inquiry */
1271 memcpy(&cp.lap, &ir->lap, 3);
1272 cp.length = ir->length;
1273 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275}
1276
1277int hci_inquiry(void __user *arg)
1278{
1279 __u8 __user *ptr = arg;
1280 struct hci_inquiry_req ir;
1281 struct hci_dev *hdev;
1282 int err = 0, do_inquiry = 0, max_rsp;
1283 long timeo;
1284 __u8 *buf;
1285
1286 if (copy_from_user(&ir, ptr, sizeof(ir)))
1287 return -EFAULT;
1288
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001289 hdev = hci_dev_get(ir.dev_id);
1290 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 return -ENODEV;
1292
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001293 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1294 err = -EBUSY;
1295 goto done;
1296 }
1297
Marcel Holtmann4a964402014-07-02 19:10:33 +02001298 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001299 err = -EOPNOTSUPP;
1300 goto done;
1301 }
1302
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001303 if (hdev->dev_type != HCI_BREDR) {
1304 err = -EOPNOTSUPP;
1305 goto done;
1306 }
1307
Johan Hedberg56f87902013-10-02 13:43:13 +03001308 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1309 err = -EOPNOTSUPP;
1310 goto done;
1311 }
1312
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001313 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001314 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001315 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001316 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 do_inquiry = 1;
1318 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001319 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
Marcel Holtmann04837f62006-07-03 10:02:33 +02001321 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001322
1323 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001324 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1325 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001326 if (err < 0)
1327 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001328
1329 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1330 * cleared). If it is interrupted by a signal, return -EINTR.
1331 */
NeilBrown74316202014-07-07 15:16:04 +10001332 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001333 TASK_INTERRUPTIBLE))
1334 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001335 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001337 /* for unlimited number of responses we will use buffer with
1338 * 255 entries
1339 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1341
1342 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1343 * copy it to the user space.
1344 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001345 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001346 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 err = -ENOMEM;
1348 goto done;
1349 }
1350
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001351 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001353 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
1355 BT_DBG("num_rsp %d", ir.num_rsp);
1356
1357 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1358 ptr += sizeof(ir);
1359 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001360 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001362 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 err = -EFAULT;
1364
1365 kfree(buf);
1366
1367done:
1368 hci_dev_put(hdev);
1369 return err;
1370}
1371
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001372static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 int ret = 0;
1375
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 BT_DBG("%s %p", hdev->name, hdev);
1377
1378 hci_req_lock(hdev);
1379
Johan Hovold94324962012-03-15 14:48:41 +01001380 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1381 ret = -ENODEV;
1382 goto done;
1383 }
1384
Marcel Holtmannd603b762014-07-06 12:11:14 +02001385 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1386 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001387 /* Check for rfkill but allow the HCI setup stage to
1388 * proceed (which in itself doesn't cause any RF activity).
1389 */
1390 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1391 ret = -ERFKILL;
1392 goto done;
1393 }
1394
1395 /* Check for valid public address or a configured static
1396 * random adddress, but let the HCI setup proceed to
1397 * be able to determine if there is a public address
1398 * or not.
1399 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001400 * In case of user channel usage, it is not important
1401 * if a public address or static random address is
1402 * available.
1403 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001404 * This check is only valid for BR/EDR controllers
1405 * since AMP controllers do not have an address.
1406 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001407 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1408 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001409 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1410 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1411 ret = -EADDRNOTAVAIL;
1412 goto done;
1413 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001414 }
1415
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 if (test_bit(HCI_UP, &hdev->flags)) {
1417 ret = -EALREADY;
1418 goto done;
1419 }
1420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 if (hdev->open(hdev)) {
1422 ret = -EIO;
1423 goto done;
1424 }
1425
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001426 atomic_set(&hdev->cmd_cnt, 1);
1427 set_bit(HCI_INIT, &hdev->flags);
1428
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001429 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1430 if (hdev->setup)
1431 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001432
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001433 /* The transport driver can set these quirks before
1434 * creating the HCI device or in its setup callback.
1435 *
1436 * In case any of them is set, the controller has to
1437 * start up as unconfigured.
1438 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001439 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1440 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001441 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001442
1443 /* For an unconfigured controller it is required to
1444 * read at least the version information provided by
1445 * the Read Local Version Information command.
1446 *
1447 * If the set_bdaddr driver callback is provided, then
1448 * also the original Bluetooth public device address
1449 * will be read using the Read BD Address command.
1450 */
1451 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1452 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001453 }
1454
Marcel Holtmann9713c172014-07-06 12:11:15 +02001455 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1456 /* If public address change is configured, ensure that
1457 * the address gets programmed. If the driver does not
1458 * support changing the public address, fail the power
1459 * on procedure.
1460 */
1461 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1462 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001463 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1464 else
1465 ret = -EADDRNOTAVAIL;
1466 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001467
1468 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02001469 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001470 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001471 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 }
1473
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001474 clear_bit(HCI_INIT, &hdev->flags);
1475
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 if (!ret) {
1477 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02001478 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 set_bit(HCI_UP, &hdev->flags);
1480 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001481 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02001482 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02001483 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001484 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001485 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001486 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001487 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001488 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001489 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001490 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001492 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001493 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001494 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495
1496 skb_queue_purge(&hdev->cmd_q);
1497 skb_queue_purge(&hdev->rx_q);
1498
1499 if (hdev->flush)
1500 hdev->flush(hdev);
1501
1502 if (hdev->sent_cmd) {
1503 kfree_skb(hdev->sent_cmd);
1504 hdev->sent_cmd = NULL;
1505 }
1506
1507 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001508 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 }
1510
1511done:
1512 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 return ret;
1514}
1515
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001516/* ---- HCI ioctl helpers ---- */
1517
1518int hci_dev_open(__u16 dev)
1519{
1520 struct hci_dev *hdev;
1521 int err;
1522
1523 hdev = hci_dev_get(dev);
1524 if (!hdev)
1525 return -ENODEV;
1526
Marcel Holtmann4a964402014-07-02 19:10:33 +02001527 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001528 * up as user channel. Trying to bring them up as normal devices
1529 * will result into a failure. Only user channel operation is
1530 * possible.
1531 *
1532 * When this function is called for a user channel, the flag
1533 * HCI_USER_CHANNEL will be set first before attempting to
1534 * open the device.
1535 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02001536 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001537 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1538 err = -EOPNOTSUPP;
1539 goto done;
1540 }
1541
Johan Hedberge1d08f42013-10-01 22:44:50 +03001542 /* We need to ensure that no other power on/off work is pending
1543 * before proceeding to call hci_dev_do_open. This is
1544 * particularly important if the setup procedure has not yet
1545 * completed.
1546 */
1547 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1548 cancel_delayed_work(&hdev->power_off);
1549
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001550 /* After this call it is guaranteed that the setup procedure
1551 * has finished. This means that error conditions like RFKILL
1552 * or no valid public or static random address apply.
1553 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001554 flush_workqueue(hdev->req_workqueue);
1555
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001556 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001557 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001558 * so that pairing works for them. Once the management interface
1559 * is in use this bit will be cleared again and userspace has
1560 * to explicitly enable it.
1561 */
1562 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1563 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001564 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001565
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001566 err = hci_dev_do_open(hdev);
1567
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001568done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001569 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001570 return err;
1571}
1572
Johan Hedbergd7347f32014-07-04 12:37:23 +03001573/* This function requires the caller holds hdev->lock */
1574static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1575{
1576 struct hci_conn_params *p;
1577
Johan Hedbergf161dd42014-08-15 21:06:54 +03001578 list_for_each_entry(p, &hdev->le_conn_params, list) {
1579 if (p->conn) {
1580 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001581 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001582 p->conn = NULL;
1583 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001584 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001585 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001586
1587 BT_DBG("All LE pending actions cleared");
1588}
1589
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590static int hci_dev_do_close(struct hci_dev *hdev)
1591{
1592 BT_DBG("%s %p", hdev->name, hdev);
1593
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001594 cancel_delayed_work(&hdev->power_off);
1595
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 hci_req_cancel(hdev, ENODEV);
1597 hci_req_lock(hdev);
1598
1599 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001600 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 hci_req_unlock(hdev);
1602 return 0;
1603 }
1604
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001605 /* Flush RX and TX works */
1606 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001607 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001609 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001610 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001611 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001612 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001613 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001614 }
1615
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001616 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001617 cancel_delayed_work(&hdev->service_cache);
1618
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001619 cancel_delayed_work_sync(&hdev->le_scan_disable);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08001620 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001621
1622 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1623 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001624
Johan Hedberg76727c02014-11-18 09:00:14 +02001625 /* Avoid potential lockdep warnings from the *_flush() calls by
1626 * ensuring the workqueue is empty up front.
1627 */
1628 drain_workqueue(hdev->workqueue);
1629
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001630 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001631
Johan Hedberg8f502f82015-01-28 19:56:02 +02001632 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1633
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001634 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1635 if (hdev->dev_type == HCI_BREDR)
1636 mgmt_powered(hdev, 0);
1637 }
1638
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001639 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001640 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001641 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001642 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
Marcel Holtmann64dae962015-01-28 14:10:28 -08001644 smp_unregister(hdev);
1645
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 hci_notify(hdev, HCI_DEV_DOWN);
1647
1648 if (hdev->flush)
1649 hdev->flush(hdev);
1650
1651 /* Reset device */
1652 skb_queue_purge(&hdev->cmd_q);
1653 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02001654 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1655 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001656 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001658 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 clear_bit(HCI_INIT, &hdev->flags);
1660 }
1661
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001662 /* flush cmd work */
1663 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664
1665 /* Drop queues */
1666 skb_queue_purge(&hdev->rx_q);
1667 skb_queue_purge(&hdev->cmd_q);
1668 skb_queue_purge(&hdev->raw_q);
1669
1670 /* Drop last sent command */
1671 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001672 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 kfree_skb(hdev->sent_cmd);
1674 hdev->sent_cmd = NULL;
1675 }
1676
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001677 kfree_skb(hdev->recv_evt);
1678 hdev->recv_evt = NULL;
1679
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 /* After this point our queues are empty
1681 * and no tasks are scheduled. */
1682 hdev->close(hdev);
1683
Johan Hedberg35b973c2013-03-15 17:06:59 -05001684 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001685 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001686 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1687
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001688 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001689 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001690
Johan Hedberge59fda82012-02-22 18:11:53 +02001691 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001692 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001693 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001694
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 hci_req_unlock(hdev);
1696
1697 hci_dev_put(hdev);
1698 return 0;
1699}
1700
1701int hci_dev_close(__u16 dev)
1702{
1703 struct hci_dev *hdev;
1704 int err;
1705
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001706 hdev = hci_dev_get(dev);
1707 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001709
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001710 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1711 err = -EBUSY;
1712 goto done;
1713 }
1714
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001715 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1716 cancel_delayed_work(&hdev->power_off);
1717
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001719
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001720done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 hci_dev_put(hdev);
1722 return err;
1723}
1724
Marcel Holtmann5c912492015-01-28 11:53:05 -08001725static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001727 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
Marcel Holtmann5c912492015-01-28 11:53:05 -08001729 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
1731 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 /* Drop queues */
1734 skb_queue_purge(&hdev->rx_q);
1735 skb_queue_purge(&hdev->cmd_q);
1736
Johan Hedberg76727c02014-11-18 09:00:14 +02001737 /* Avoid potential lockdep warnings from the *_flush() calls by
1738 * ensuring the workqueue is empty up front.
1739 */
1740 drain_workqueue(hdev->workqueue);
1741
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001742 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001743 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001745 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
1747 if (hdev->flush)
1748 hdev->flush(hdev);
1749
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001750 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001751 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001753 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 return ret;
1757}
1758
Marcel Holtmann5c912492015-01-28 11:53:05 -08001759int hci_dev_reset(__u16 dev)
1760{
1761 struct hci_dev *hdev;
1762 int err;
1763
1764 hdev = hci_dev_get(dev);
1765 if (!hdev)
1766 return -ENODEV;
1767
1768 if (!test_bit(HCI_UP, &hdev->flags)) {
1769 err = -ENETDOWN;
1770 goto done;
1771 }
1772
1773 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1774 err = -EBUSY;
1775 goto done;
1776 }
1777
1778 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1779 err = -EOPNOTSUPP;
1780 goto done;
1781 }
1782
1783 err = hci_dev_do_reset(hdev);
1784
1785done:
1786 hci_dev_put(hdev);
1787 return err;
1788}
1789
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790int hci_dev_reset_stat(__u16 dev)
1791{
1792 struct hci_dev *hdev;
1793 int ret = 0;
1794
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001795 hdev = hci_dev_get(dev);
1796 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 return -ENODEV;
1798
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001799 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1800 ret = -EBUSY;
1801 goto done;
1802 }
1803
Marcel Holtmann4a964402014-07-02 19:10:33 +02001804 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001805 ret = -EOPNOTSUPP;
1806 goto done;
1807 }
1808
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1810
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001811done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 return ret;
1814}
1815
Johan Hedberg123abc02014-07-10 12:09:07 +03001816static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1817{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001818 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001819
1820 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1821
1822 if ((scan & SCAN_PAGE))
1823 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1824 &hdev->dev_flags);
1825 else
1826 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1827 &hdev->dev_flags);
1828
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001829 if ((scan & SCAN_INQUIRY)) {
1830 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1831 &hdev->dev_flags);
1832 } else {
1833 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1834 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1835 &hdev->dev_flags);
1836 }
1837
Johan Hedberg123abc02014-07-10 12:09:07 +03001838 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1839 return;
1840
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001841 if (conn_changed || discov_changed) {
1842 /* In case this was disabled through mgmt */
1843 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1844
1845 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1846 mgmt_update_adv_data(hdev);
1847
Johan Hedberg123abc02014-07-10 12:09:07 +03001848 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001849 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001850}
1851
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852int hci_dev_cmd(unsigned int cmd, void __user *arg)
1853{
1854 struct hci_dev *hdev;
1855 struct hci_dev_req dr;
1856 int err = 0;
1857
1858 if (copy_from_user(&dr, arg, sizeof(dr)))
1859 return -EFAULT;
1860
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001861 hdev = hci_dev_get(dr.dev_id);
1862 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 return -ENODEV;
1864
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001865 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1866 err = -EBUSY;
1867 goto done;
1868 }
1869
Marcel Holtmann4a964402014-07-02 19:10:33 +02001870 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001871 err = -EOPNOTSUPP;
1872 goto done;
1873 }
1874
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001875 if (hdev->dev_type != HCI_BREDR) {
1876 err = -EOPNOTSUPP;
1877 goto done;
1878 }
1879
Johan Hedberg56f87902013-10-02 13:43:13 +03001880 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1881 err = -EOPNOTSUPP;
1882 goto done;
1883 }
1884
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 switch (cmd) {
1886 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001887 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1888 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 break;
1890
1891 case HCISETENCRYPT:
1892 if (!lmp_encrypt_capable(hdev)) {
1893 err = -EOPNOTSUPP;
1894 break;
1895 }
1896
1897 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1898 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001899 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1900 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 if (err)
1902 break;
1903 }
1904
Johan Hedberg01178cd2013-03-05 20:37:41 +02001905 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1906 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 break;
1908
1909 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001910 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1911 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001912
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001913 /* Ensure that the connectable and discoverable states
1914 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001915 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001916 if (!err)
1917 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 break;
1919
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001920 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001921 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1922 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001923 break;
1924
1925 case HCISETLINKMODE:
1926 hdev->link_mode = ((__u16) dr.dev_opt) &
1927 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1928 break;
1929
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 case HCISETPTYPE:
1931 hdev->pkt_type = (__u16) dr.dev_opt;
1932 break;
1933
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001935 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1936 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 break;
1938
1939 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001940 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1941 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 break;
1943
1944 default:
1945 err = -EINVAL;
1946 break;
1947 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001948
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001949done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 hci_dev_put(hdev);
1951 return err;
1952}
1953
1954int hci_get_dev_list(void __user *arg)
1955{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001956 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 struct hci_dev_list_req *dl;
1958 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 int n = 0, size, err;
1960 __u16 dev_num;
1961
1962 if (get_user(dev_num, (__u16 __user *) arg))
1963 return -EFAULT;
1964
1965 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1966 return -EINVAL;
1967
1968 size = sizeof(*dl) + dev_num * sizeof(*dr);
1969
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001970 dl = kzalloc(size, GFP_KERNEL);
1971 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 return -ENOMEM;
1973
1974 dr = dl->dev_req;
1975
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001976 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001977 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001978 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001979
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001980 /* When the auto-off is configured it means the transport
1981 * is running, but in that case still indicate that the
1982 * device is actually down.
1983 */
1984 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1985 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001986
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001988 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001989
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 if (++n >= dev_num)
1991 break;
1992 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001993 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
1995 dl->dev_num = n;
1996 size = sizeof(*dl) + n * sizeof(*dr);
1997
1998 err = copy_to_user(arg, dl, size);
1999 kfree(dl);
2000
2001 return err ? -EFAULT : 0;
2002}
2003
2004int hci_get_dev_info(void __user *arg)
2005{
2006 struct hci_dev *hdev;
2007 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002008 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 int err = 0;
2010
2011 if (copy_from_user(&di, arg, sizeof(di)))
2012 return -EFAULT;
2013
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002014 hdev = hci_dev_get(di.dev_id);
2015 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 return -ENODEV;
2017
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002018 /* When the auto-off is configured it means the transport
2019 * is running, but in that case still indicate that the
2020 * device is actually down.
2021 */
2022 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2023 flags = hdev->flags & ~BIT(HCI_UP);
2024 else
2025 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002026
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 strcpy(di.name, hdev->name);
2028 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002029 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002030 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002032 if (lmp_bredr_capable(hdev)) {
2033 di.acl_mtu = hdev->acl_mtu;
2034 di.acl_pkts = hdev->acl_pkts;
2035 di.sco_mtu = hdev->sco_mtu;
2036 di.sco_pkts = hdev->sco_pkts;
2037 } else {
2038 di.acl_mtu = hdev->le_mtu;
2039 di.acl_pkts = hdev->le_pkts;
2040 di.sco_mtu = 0;
2041 di.sco_pkts = 0;
2042 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 di.link_policy = hdev->link_policy;
2044 di.link_mode = hdev->link_mode;
2045
2046 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2047 memcpy(&di.features, &hdev->features, sizeof(di.features));
2048
2049 if (copy_to_user(arg, &di, sizeof(di)))
2050 err = -EFAULT;
2051
2052 hci_dev_put(hdev);
2053
2054 return err;
2055}
2056
2057/* ---- Interface to HCI drivers ---- */
2058
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002059static int hci_rfkill_set_block(void *data, bool blocked)
2060{
2061 struct hci_dev *hdev = data;
2062
2063 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2064
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002065 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2066 return -EBUSY;
2067
Johan Hedberg5e130362013-09-13 08:58:17 +03002068 if (blocked) {
2069 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002070 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2071 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002072 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002073 } else {
2074 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002075 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002076
2077 return 0;
2078}
2079
2080static const struct rfkill_ops hci_rfkill_ops = {
2081 .set_block = hci_rfkill_set_block,
2082};
2083
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002084static void hci_power_on(struct work_struct *work)
2085{
2086 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002087 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002088
2089 BT_DBG("%s", hdev->name);
2090
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002091 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002092 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302093 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002094 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302095 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002096 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002097 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002098
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002099 /* During the HCI setup phase, a few error conditions are
2100 * ignored and they need to be checked now. If they are still
2101 * valid, it is important to turn the device back off.
2102 */
2103 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002104 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002105 (hdev->dev_type == HCI_BREDR &&
2106 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2107 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002108 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2109 hci_dev_do_close(hdev);
2110 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002111 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2112 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002113 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002114
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002115 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002116 /* For unconfigured devices, set the HCI_RAW flag
2117 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002118 */
2119 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2120 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002121
2122 /* For fully configured devices, this will send
2123 * the Index Added event. For unconfigured devices,
2124 * it will send Unconfigued Index Added event.
2125 *
2126 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2127 * and no event will be send.
2128 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002129 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002130 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002131 /* When the controller is now configured, then it
2132 * is important to clear the HCI_RAW flag.
2133 */
2134 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2135 clear_bit(HCI_RAW, &hdev->flags);
2136
Marcel Holtmannd603b762014-07-06 12:11:14 +02002137 /* Powering on the controller with HCI_CONFIG set only
2138 * happens with the transition from unconfigured to
2139 * configured. This will send the Index Added event.
2140 */
2141 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002142 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002143}
2144
2145static void hci_power_off(struct work_struct *work)
2146{
Johan Hedberg32435532011-11-07 22:16:04 +02002147 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002148 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002149
2150 BT_DBG("%s", hdev->name);
2151
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002152 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002153}
2154
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002155static void hci_error_reset(struct work_struct *work)
2156{
2157 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2158
2159 BT_DBG("%s", hdev->name);
2160
2161 if (hdev->hw_error)
2162 hdev->hw_error(hdev, hdev->hw_error_code);
2163 else
2164 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2165 hdev->hw_error_code);
2166
2167 if (hci_dev_do_close(hdev))
2168 return;
2169
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002170 hci_dev_do_open(hdev);
2171}
2172
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002173static void hci_discov_off(struct work_struct *work)
2174{
2175 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002176
2177 hdev = container_of(work, struct hci_dev, discov_off.work);
2178
2179 BT_DBG("%s", hdev->name);
2180
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002181 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002182}
2183
Johan Hedberg35f74982014-02-18 17:14:32 +02002184void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002185{
Johan Hedberg48210022013-01-27 00:31:28 +02002186 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002187
Johan Hedberg48210022013-01-27 00:31:28 +02002188 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2189 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002190 kfree(uuid);
2191 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002192}
2193
Johan Hedberg35f74982014-02-18 17:14:32 +02002194void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002195{
Johan Hedberg0378b592014-11-19 15:22:22 +02002196 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002197
Johan Hedberg0378b592014-11-19 15:22:22 +02002198 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2199 list_del_rcu(&key->list);
2200 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002201 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002202}
2203
Johan Hedberg35f74982014-02-18 17:14:32 +02002204void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002205{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002206 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002207
Johan Hedberg970d0f12014-11-13 14:37:47 +02002208 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2209 list_del_rcu(&k->list);
2210 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002211 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002212}
2213
Johan Hedberg970c4e42014-02-18 10:19:33 +02002214void hci_smp_irks_clear(struct hci_dev *hdev)
2215{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002216 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002217
Johan Hedbergadae20c2014-11-13 14:37:48 +02002218 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2219 list_del_rcu(&k->list);
2220 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002221 }
2222}
2223
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002224struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2225{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002226 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002227
Johan Hedberg0378b592014-11-19 15:22:22 +02002228 rcu_read_lock();
2229 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2230 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2231 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002232 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002233 }
2234 }
2235 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002236
2237 return NULL;
2238}
2239
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302240static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002241 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002242{
2243 /* Legacy key */
2244 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302245 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002246
2247 /* Debug keys are insecure so don't store them persistently */
2248 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302249 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002250
2251 /* Changed combination key and there's no previous one */
2252 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302253 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002254
2255 /* Security mode 3 case */
2256 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302257 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002258
Johan Hedberge3befab2014-06-01 16:33:39 +03002259 /* BR/EDR key derived using SC from an LE link */
2260 if (conn->type == LE_LINK)
2261 return true;
2262
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002263 /* Neither local nor remote side had no-bonding as requirement */
2264 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302265 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002266
2267 /* Local side had dedicated bonding as requirement */
2268 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302269 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002270
2271 /* Remote side had dedicated bonding as requirement */
2272 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302273 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002274
2275 /* If none of the above criteria match, then don't store the key
2276 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302277 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002278}
2279
Johan Hedberge804d252014-07-16 11:42:28 +03002280static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002281{
Johan Hedberge804d252014-07-16 11:42:28 +03002282 if (type == SMP_LTK)
2283 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002284
Johan Hedberge804d252014-07-16 11:42:28 +03002285 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002286}
2287
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002288struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2289 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002290{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002291 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002292
Johan Hedberg970d0f12014-11-13 14:37:47 +02002293 rcu_read_lock();
2294 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002295 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2296 continue;
2297
Johan Hedberg923e2412014-12-03 12:43:39 +02002298 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002299 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002300 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002301 }
2302 }
2303 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002304
2305 return NULL;
2306}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002307
Johan Hedberg970c4e42014-02-18 10:19:33 +02002308struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2309{
2310 struct smp_irk *irk;
2311
Johan Hedbergadae20c2014-11-13 14:37:48 +02002312 rcu_read_lock();
2313 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2314 if (!bacmp(&irk->rpa, rpa)) {
2315 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002316 return irk;
2317 }
2318 }
2319
Johan Hedbergadae20c2014-11-13 14:37:48 +02002320 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2321 if (smp_irk_matches(hdev, irk->val, rpa)) {
2322 bacpy(&irk->rpa, rpa);
2323 rcu_read_unlock();
2324 return irk;
2325 }
2326 }
2327 rcu_read_unlock();
2328
Johan Hedberg970c4e42014-02-18 10:19:33 +02002329 return NULL;
2330}
2331
2332struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2333 u8 addr_type)
2334{
2335 struct smp_irk *irk;
2336
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002337 /* Identity Address must be public or static random */
2338 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2339 return NULL;
2340
Johan Hedbergadae20c2014-11-13 14:37:48 +02002341 rcu_read_lock();
2342 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002343 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002344 bacmp(bdaddr, &irk->bdaddr) == 0) {
2345 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002346 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002347 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002348 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002349 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002350
2351 return NULL;
2352}
2353
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002354struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002355 bdaddr_t *bdaddr, u8 *val, u8 type,
2356 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002357{
2358 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302359 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002360
2361 old_key = hci_find_link_key(hdev, bdaddr);
2362 if (old_key) {
2363 old_key_type = old_key->type;
2364 key = old_key;
2365 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002366 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002367 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002368 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002369 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002370 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002371 }
2372
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002373 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002374
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002375 /* Some buggy controller combinations generate a changed
2376 * combination key for legacy pairing even when there's no
2377 * previous key */
2378 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002379 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002380 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002381 if (conn)
2382 conn->key_type = type;
2383 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002384
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002385 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002386 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002387 key->pin_len = pin_len;
2388
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002389 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002390 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002391 else
2392 key->type = type;
2393
Johan Hedberg7652ff62014-06-24 13:15:49 +03002394 if (persistent)
2395 *persistent = hci_persistent_key(hdev, conn, type,
2396 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002397
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002398 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002399}
2400
Johan Hedbergca9142b2014-02-19 14:57:44 +02002401struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002402 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002403 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002404{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002405 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002406 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002407
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002408 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002409 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002410 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002411 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002412 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002413 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002414 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002415 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002416 }
2417
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002418 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002419 key->bdaddr_type = addr_type;
2420 memcpy(key->val, tk, sizeof(key->val));
2421 key->authenticated = authenticated;
2422 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002423 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002424 key->enc_size = enc_size;
2425 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002426
Johan Hedbergca9142b2014-02-19 14:57:44 +02002427 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002428}
2429
Johan Hedbergca9142b2014-02-19 14:57:44 +02002430struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2431 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002432{
2433 struct smp_irk *irk;
2434
2435 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2436 if (!irk) {
2437 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2438 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002439 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002440
2441 bacpy(&irk->bdaddr, bdaddr);
2442 irk->addr_type = addr_type;
2443
Johan Hedbergadae20c2014-11-13 14:37:48 +02002444 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002445 }
2446
2447 memcpy(irk->val, val, 16);
2448 bacpy(&irk->rpa, rpa);
2449
Johan Hedbergca9142b2014-02-19 14:57:44 +02002450 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002451}
2452
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002453int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2454{
2455 struct link_key *key;
2456
2457 key = hci_find_link_key(hdev, bdaddr);
2458 if (!key)
2459 return -ENOENT;
2460
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002461 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002462
Johan Hedberg0378b592014-11-19 15:22:22 +02002463 list_del_rcu(&key->list);
2464 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002465
2466 return 0;
2467}
2468
Johan Hedberge0b2b272014-02-18 17:14:31 +02002469int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002470{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002471 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002472 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002473
Johan Hedberg970d0f12014-11-13 14:37:47 +02002474 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002475 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002476 continue;
2477
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002478 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002479
Johan Hedberg970d0f12014-11-13 14:37:47 +02002480 list_del_rcu(&k->list);
2481 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002482 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002483 }
2484
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002485 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002486}
2487
Johan Hedberga7ec7332014-02-18 17:14:35 +02002488void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2489{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002490 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002491
Johan Hedbergadae20c2014-11-13 14:37:48 +02002492 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002493 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2494 continue;
2495
2496 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2497
Johan Hedbergadae20c2014-11-13 14:37:48 +02002498 list_del_rcu(&k->list);
2499 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002500 }
2501}
2502
Ville Tervo6bd32322011-02-16 16:32:41 +02002503/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002504static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002505{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002506 struct hci_dev *hdev = container_of(work, struct hci_dev,
2507 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002508
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002509 if (hdev->sent_cmd) {
2510 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2511 u16 opcode = __le16_to_cpu(sent->opcode);
2512
2513 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2514 } else {
2515 BT_ERR("%s command tx timeout", hdev->name);
2516 }
2517
Ville Tervo6bd32322011-02-16 16:32:41 +02002518 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002519 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002520}
2521
Szymon Janc2763eda2011-03-22 13:12:22 +01002522struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002523 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002524{
2525 struct oob_data *data;
2526
Johan Hedberg6928a922014-10-26 20:46:09 +01002527 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2528 if (bacmp(bdaddr, &data->bdaddr) != 0)
2529 continue;
2530 if (data->bdaddr_type != bdaddr_type)
2531 continue;
2532 return data;
2533 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002534
2535 return NULL;
2536}
2537
Johan Hedberg6928a922014-10-26 20:46:09 +01002538int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2539 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002540{
2541 struct oob_data *data;
2542
Johan Hedberg6928a922014-10-26 20:46:09 +01002543 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002544 if (!data)
2545 return -ENOENT;
2546
Johan Hedberg6928a922014-10-26 20:46:09 +01002547 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002548
2549 list_del(&data->list);
2550 kfree(data);
2551
2552 return 0;
2553}
2554
Johan Hedberg35f74982014-02-18 17:14:32 +02002555void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002556{
2557 struct oob_data *data, *n;
2558
2559 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2560 list_del(&data->list);
2561 kfree(data);
2562 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002563}
2564
Marcel Holtmann07988722014-01-10 02:07:29 -08002565int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002566 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002567 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002568{
2569 struct oob_data *data;
2570
Johan Hedberg6928a922014-10-26 20:46:09 +01002571 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002572 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002573 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002574 if (!data)
2575 return -ENOMEM;
2576
2577 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002578 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002579 list_add(&data->list, &hdev->remote_oob_data);
2580 }
2581
Johan Hedberg81328d52014-10-26 20:33:47 +01002582 if (hash192 && rand192) {
2583 memcpy(data->hash192, hash192, sizeof(data->hash192));
2584 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002585 if (hash256 && rand256)
2586 data->present = 0x03;
Johan Hedberg81328d52014-10-26 20:33:47 +01002587 } else {
2588 memset(data->hash192, 0, sizeof(data->hash192));
2589 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002590 if (hash256 && rand256)
2591 data->present = 0x02;
2592 else
2593 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002594 }
2595
Johan Hedberg81328d52014-10-26 20:33:47 +01002596 if (hash256 && rand256) {
2597 memcpy(data->hash256, hash256, sizeof(data->hash256));
2598 memcpy(data->rand256, rand256, sizeof(data->rand256));
2599 } else {
2600 memset(data->hash256, 0, sizeof(data->hash256));
2601 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002602 if (hash192 && rand192)
2603 data->present = 0x01;
Johan Hedberg81328d52014-10-26 20:33:47 +01002604 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002605
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002606 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002607
2608 return 0;
2609}
2610
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002611struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002612 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002613{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002614 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002615
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002616 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002617 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002618 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002619 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002620
2621 return NULL;
2622}
2623
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002624void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002625{
2626 struct list_head *p, *n;
2627
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002628 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002629 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002630
2631 list_del(p);
2632 kfree(b);
2633 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002634}
2635
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002636int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002637{
2638 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002639
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002640 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002641 return -EBADF;
2642
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002643 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002644 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002645
Johan Hedberg27f70f32014-07-21 10:50:06 +03002646 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002647 if (!entry)
2648 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002649
2650 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002651 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002652
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002653 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002654
2655 return 0;
2656}
2657
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002658int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002659{
2660 struct bdaddr_list *entry;
2661
Johan Hedberg35f74982014-02-18 17:14:32 +02002662 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002663 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002664 return 0;
2665 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002666
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002667 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002668 if (!entry)
2669 return -ENOENT;
2670
2671 list_del(&entry->list);
2672 kfree(entry);
2673
2674 return 0;
2675}
2676
Andre Guedes15819a72014-02-03 13:56:18 -03002677/* This function requires the caller holds hdev->lock */
2678struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2679 bdaddr_t *addr, u8 addr_type)
2680{
2681 struct hci_conn_params *params;
2682
Johan Hedberg738f6182014-07-03 19:33:51 +03002683 /* The conn params list only contains identity addresses */
2684 if (!hci_is_identity_address(addr, addr_type))
2685 return NULL;
2686
Andre Guedes15819a72014-02-03 13:56:18 -03002687 list_for_each_entry(params, &hdev->le_conn_params, list) {
2688 if (bacmp(&params->addr, addr) == 0 &&
2689 params->addr_type == addr_type) {
2690 return params;
2691 }
2692 }
2693
2694 return NULL;
2695}
2696
2697/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002698struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2699 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002700{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002701 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002702
Johan Hedberg738f6182014-07-03 19:33:51 +03002703 /* The list only contains identity addresses */
2704 if (!hci_is_identity_address(addr, addr_type))
2705 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002706
Johan Hedberg501f8822014-07-04 12:37:26 +03002707 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002708 if (bacmp(&param->addr, addr) == 0 &&
2709 param->addr_type == addr_type)
2710 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002711 }
2712
2713 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002714}
2715
2716/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002717struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2718 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002719{
2720 struct hci_conn_params *params;
2721
Johan Hedbergc46245b2014-07-02 17:37:33 +03002722 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002723 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03002724
Andre Guedes15819a72014-02-03 13:56:18 -03002725 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002726 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002727 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002728
2729 params = kzalloc(sizeof(*params), GFP_KERNEL);
2730 if (!params) {
2731 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002732 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002733 }
2734
2735 bacpy(&params->addr, addr);
2736 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002737
2738 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002739 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002740
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002741 params->conn_min_interval = hdev->le_conn_min_interval;
2742 params->conn_max_interval = hdev->le_conn_max_interval;
2743 params->conn_latency = hdev->le_conn_latency;
2744 params->supervision_timeout = hdev->le_supv_timeout;
2745 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2746
2747 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2748
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002749 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002750}
2751
Johan Hedbergf6c63242014-08-15 21:06:59 +03002752static void hci_conn_params_free(struct hci_conn_params *params)
2753{
2754 if (params->conn) {
2755 hci_conn_drop(params->conn);
2756 hci_conn_put(params->conn);
2757 }
2758
2759 list_del(&params->action);
2760 list_del(&params->list);
2761 kfree(params);
2762}
2763
Andre Guedes15819a72014-02-03 13:56:18 -03002764/* This function requires the caller holds hdev->lock */
2765void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2766{
2767 struct hci_conn_params *params;
2768
2769 params = hci_conn_params_lookup(hdev, addr, addr_type);
2770 if (!params)
2771 return;
2772
Johan Hedbergf6c63242014-08-15 21:06:59 +03002773 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002774
Johan Hedberg95305ba2014-07-04 12:37:21 +03002775 hci_update_background_scan(hdev);
2776
Andre Guedes15819a72014-02-03 13:56:18 -03002777 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2778}
2779
2780/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03002781void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002782{
2783 struct hci_conn_params *params, *tmp;
2784
2785 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03002786 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2787 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03002788 list_del(&params->list);
2789 kfree(params);
2790 }
2791
Johan Hedberg55af49a2014-07-02 17:37:26 +03002792 BT_DBG("All LE disabled connection parameters were removed");
2793}
2794
2795/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03002796void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002797{
2798 struct hci_conn_params *params, *tmp;
2799
Johan Hedbergf6c63242014-08-15 21:06:59 +03002800 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2801 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002802
Johan Hedberga2f41a82014-07-04 12:37:19 +03002803 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02002804
Andre Guedes15819a72014-02-03 13:56:18 -03002805 BT_DBG("All LE connection parameters were removed");
2806}
2807
Marcel Holtmann1904a852015-01-11 13:50:44 -08002808static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002809{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002810 if (status) {
2811 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002812
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002813 hci_dev_lock(hdev);
2814 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2815 hci_dev_unlock(hdev);
2816 return;
2817 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002818}
2819
Marcel Holtmann1904a852015-01-11 13:50:44 -08002820static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2821 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002822{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002823 /* General inquiry access code (GIAC) */
2824 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2825 struct hci_request req;
2826 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002827 int err;
2828
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002829 if (status) {
2830 BT_ERR("Failed to disable LE scanning: status %d", status);
2831 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002832 }
2833
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002834 hdev->discovery.scan_start = 0;
2835
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002836 switch (hdev->discovery.type) {
2837 case DISCOV_TYPE_LE:
2838 hci_dev_lock(hdev);
2839 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2840 hci_dev_unlock(hdev);
2841 break;
2842
2843 case DISCOV_TYPE_INTERLEAVED:
2844 hci_req_init(&req, hdev);
2845
2846 memset(&cp, 0, sizeof(cp));
2847 memcpy(&cp.lap, lap, sizeof(cp.lap));
2848 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2849 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2850
2851 hci_dev_lock(hdev);
2852
2853 hci_inquiry_cache_flush(hdev);
2854
2855 err = hci_req_run(&req, inquiry_complete);
2856 if (err) {
2857 BT_ERR("Inquiry request failed: err %d", err);
2858 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2859 }
2860
2861 hci_dev_unlock(hdev);
2862 break;
2863 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002864}
2865
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002866static void le_scan_disable_work(struct work_struct *work)
2867{
2868 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002869 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002870 struct hci_request req;
2871 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002872
2873 BT_DBG("%s", hdev->name);
2874
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002875 cancel_delayed_work_sync(&hdev->le_scan_restart);
2876
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002877 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002878
Andre Guedesb1efcc22014-02-26 20:21:40 -03002879 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002880
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002881 err = hci_req_run(&req, le_scan_disable_work_complete);
2882 if (err)
2883 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002884}
2885
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002886static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2887 u16 opcode)
2888{
2889 unsigned long timeout, duration, scan_start, now;
2890
2891 BT_DBG("%s", hdev->name);
2892
2893 if (status) {
2894 BT_ERR("Failed to restart LE scan: status %d", status);
2895 return;
2896 }
2897
2898 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2899 !hdev->discovery.scan_start)
2900 return;
2901
2902 /* When the scan was started, hdev->le_scan_disable has been queued
2903 * after duration from scan_start. During scan restart this job
2904 * has been canceled, and we need to queue it again after proper
2905 * timeout, to make sure that scan does not run indefinitely.
2906 */
2907 duration = hdev->discovery.scan_duration;
2908 scan_start = hdev->discovery.scan_start;
2909 now = jiffies;
2910 if (now - scan_start <= duration) {
2911 int elapsed;
2912
2913 if (now >= scan_start)
2914 elapsed = now - scan_start;
2915 else
2916 elapsed = ULONG_MAX - scan_start + now;
2917
2918 timeout = duration - elapsed;
2919 } else {
2920 timeout = 0;
2921 }
2922 queue_delayed_work(hdev->workqueue,
2923 &hdev->le_scan_disable, timeout);
2924}
2925
2926static void le_scan_restart_work(struct work_struct *work)
2927{
2928 struct hci_dev *hdev = container_of(work, struct hci_dev,
2929 le_scan_restart.work);
2930 struct hci_request req;
2931 struct hci_cp_le_set_scan_enable cp;
2932 int err;
2933
2934 BT_DBG("%s", hdev->name);
2935
2936 /* If controller is not scanning we are done. */
2937 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2938 return;
2939
2940 hci_req_init(&req, hdev);
2941
2942 hci_req_add_le_scan_disable(&req);
2943
2944 memset(&cp, 0, sizeof(cp));
2945 cp.enable = LE_SCAN_ENABLE;
2946 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2947 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2948
2949 err = hci_req_run(&req, le_scan_restart_work_complete);
2950 if (err)
2951 BT_ERR("Restart LE scan request failed: err %d", err);
2952}
2953
Johan Hedberga1f4c312014-02-27 14:05:41 +02002954/* Copy the Identity Address of the controller.
2955 *
2956 * If the controller has a public BD_ADDR, then by default use that one.
2957 * If this is a LE only controller without a public address, default to
2958 * the static random address.
2959 *
2960 * For debugging purposes it is possible to force controllers with a
2961 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002962 *
2963 * In case BR/EDR has been disabled on a dual-mode controller and
2964 * userspace has configured a static address, then that address
2965 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002966 */
2967void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2968 u8 *bdaddr_type)
2969{
Marcel Holtmann111902f2014-06-21 04:53:17 +02002970 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002971 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2972 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2973 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002974 bacpy(bdaddr, &hdev->static_addr);
2975 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2976 } else {
2977 bacpy(bdaddr, &hdev->bdaddr);
2978 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2979 }
2980}
2981
David Herrmann9be0dab2012-04-22 14:39:57 +02002982/* Alloc HCI device */
2983struct hci_dev *hci_alloc_dev(void)
2984{
2985 struct hci_dev *hdev;
2986
Johan Hedberg27f70f32014-07-21 10:50:06 +03002987 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02002988 if (!hdev)
2989 return NULL;
2990
David Herrmannb1b813d2012-04-22 14:39:58 +02002991 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2992 hdev->esco_type = (ESCO_HV1);
2993 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002994 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2995 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02002996 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002997 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2998 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002999
David Herrmannb1b813d2012-04-22 14:39:58 +02003000 hdev->sniff_max_interval = 800;
3001 hdev->sniff_min_interval = 80;
3002
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003003 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003004 hdev->le_adv_min_interval = 0x0800;
3005 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003006 hdev->le_scan_interval = 0x0060;
3007 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003008 hdev->le_conn_min_interval = 0x0028;
3009 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003010 hdev->le_conn_latency = 0x0000;
3011 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01003012 hdev->le_def_tx_len = 0x001b;
3013 hdev->le_def_tx_time = 0x0148;
3014 hdev->le_max_tx_len = 0x001b;
3015 hdev->le_max_tx_time = 0x0148;
3016 hdev->le_max_rx_len = 0x001b;
3017 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003018
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003019 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003020 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003021 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3022 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003023
David Herrmannb1b813d2012-04-22 14:39:58 +02003024 mutex_init(&hdev->lock);
3025 mutex_init(&hdev->req_lock);
3026
3027 INIT_LIST_HEAD(&hdev->mgmt_pending);
3028 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003029 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003030 INIT_LIST_HEAD(&hdev->uuids);
3031 INIT_LIST_HEAD(&hdev->link_keys);
3032 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003033 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003034 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003035 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003036 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003037 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003038 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003039 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003040
3041 INIT_WORK(&hdev->rx_work, hci_rx_work);
3042 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3043 INIT_WORK(&hdev->tx_work, hci_tx_work);
3044 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003045 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003046
David Herrmannb1b813d2012-04-22 14:39:58 +02003047 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3048 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3049 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003050 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
David Herrmannb1b813d2012-04-22 14:39:58 +02003051
David Herrmannb1b813d2012-04-22 14:39:58 +02003052 skb_queue_head_init(&hdev->rx_q);
3053 skb_queue_head_init(&hdev->cmd_q);
3054 skb_queue_head_init(&hdev->raw_q);
3055
3056 init_waitqueue_head(&hdev->req_wait_q);
3057
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003058 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003059
David Herrmannb1b813d2012-04-22 14:39:58 +02003060 hci_init_sysfs(hdev);
3061 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003062
3063 return hdev;
3064}
3065EXPORT_SYMBOL(hci_alloc_dev);
3066
3067/* Free HCI device */
3068void hci_free_dev(struct hci_dev *hdev)
3069{
David Herrmann9be0dab2012-04-22 14:39:57 +02003070 /* will free via device release */
3071 put_device(&hdev->dev);
3072}
3073EXPORT_SYMBOL(hci_free_dev);
3074
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075/* Register HCI device */
3076int hci_register_dev(struct hci_dev *hdev)
3077{
David Herrmannb1b813d2012-04-22 14:39:58 +02003078 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079
Marcel Holtmann74292d52014-07-06 15:50:27 +02003080 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081 return -EINVAL;
3082
Mat Martineau08add512011-11-02 16:18:36 -07003083 /* Do not allow HCI_AMP devices to register at index 0,
3084 * so the index can be used as the AMP controller ID.
3085 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003086 switch (hdev->dev_type) {
3087 case HCI_BREDR:
3088 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3089 break;
3090 case HCI_AMP:
3091 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3092 break;
3093 default:
3094 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003096
Sasha Levin3df92b32012-05-27 22:36:56 +02003097 if (id < 0)
3098 return id;
3099
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 sprintf(hdev->name, "hci%d", id);
3101 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003102
3103 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3104
Kees Cookd8537542013-07-03 15:04:57 -07003105 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3106 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003107 if (!hdev->workqueue) {
3108 error = -ENOMEM;
3109 goto err;
3110 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003111
Kees Cookd8537542013-07-03 15:04:57 -07003112 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3113 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003114 if (!hdev->req_workqueue) {
3115 destroy_workqueue(hdev->workqueue);
3116 error = -ENOMEM;
3117 goto err;
3118 }
3119
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003120 if (!IS_ERR_OR_NULL(bt_debugfs))
3121 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3122
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003123 dev_set_name(&hdev->dev, "%s", hdev->name);
3124
3125 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003126 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003127 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003129 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003130 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3131 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003132 if (hdev->rfkill) {
3133 if (rfkill_register(hdev->rfkill) < 0) {
3134 rfkill_destroy(hdev->rfkill);
3135 hdev->rfkill = NULL;
3136 }
3137 }
3138
Johan Hedberg5e130362013-09-13 08:58:17 +03003139 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3140 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3141
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003142 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003143 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003144
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003145 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003146 /* Assume BR/EDR support until proven otherwise (such as
3147 * through reading supported features during init.
3148 */
3149 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3150 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003151
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003152 write_lock(&hci_dev_list_lock);
3153 list_add(&hdev->list, &hci_dev_list);
3154 write_unlock(&hci_dev_list_lock);
3155
Marcel Holtmann4a964402014-07-02 19:10:33 +02003156 /* Devices that are marked for raw-only usage are unconfigured
3157 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003158 */
3159 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02003160 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003161
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003163 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164
Johan Hedberg19202572013-01-14 22:33:51 +02003165 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003166
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003168
David Herrmann33ca9542011-10-08 14:58:49 +02003169err_wqueue:
3170 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003171 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003172err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003173 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003174
David Herrmann33ca9542011-10-08 14:58:49 +02003175 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176}
3177EXPORT_SYMBOL(hci_register_dev);
3178
3179/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003180void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181{
Sasha Levin3df92b32012-05-27 22:36:56 +02003182 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003183
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003184 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185
Johan Hovold94324962012-03-15 14:48:41 +01003186 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3187
Sasha Levin3df92b32012-05-27 22:36:56 +02003188 id = hdev->id;
3189
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003190 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003192 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193
3194 hci_dev_do_close(hdev);
3195
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303196 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003197 kfree_skb(hdev->reassembly[i]);
3198
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003199 cancel_work_sync(&hdev->power_on);
3200
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003201 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02003202 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3203 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003204 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003205 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003206 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003207 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003208
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003209 /* mgmt_index_removed should take care of emptying the
3210 * pending list */
3211 BUG_ON(!list_empty(&hdev->mgmt_pending));
3212
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 hci_notify(hdev, HCI_DEV_UNREG);
3214
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003215 if (hdev->rfkill) {
3216 rfkill_unregister(hdev->rfkill);
3217 rfkill_destroy(hdev->rfkill);
3218 }
3219
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003220 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003221
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003222 debugfs_remove_recursive(hdev->debugfs);
3223
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003224 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003225 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003226
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003227 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003228 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003229 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003230 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003231 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003232 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003233 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003234 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003235 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003236 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003237 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003238 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003239
David Herrmanndc946bd2012-01-07 15:47:24 +01003240 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003241
3242 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243}
3244EXPORT_SYMBOL(hci_unregister_dev);
3245
3246/* Suspend HCI device */
3247int hci_suspend_dev(struct hci_dev *hdev)
3248{
3249 hci_notify(hdev, HCI_DEV_SUSPEND);
3250 return 0;
3251}
3252EXPORT_SYMBOL(hci_suspend_dev);
3253
3254/* Resume HCI device */
3255int hci_resume_dev(struct hci_dev *hdev)
3256{
3257 hci_notify(hdev, HCI_DEV_RESUME);
3258 return 0;
3259}
3260EXPORT_SYMBOL(hci_resume_dev);
3261
Marcel Holtmann75e05692014-11-02 08:15:38 +01003262/* Reset HCI device */
3263int hci_reset_dev(struct hci_dev *hdev)
3264{
3265 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3266 struct sk_buff *skb;
3267
3268 skb = bt_skb_alloc(3, GFP_ATOMIC);
3269 if (!skb)
3270 return -ENOMEM;
3271
3272 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3273 memcpy(skb_put(skb, 3), hw_err, 3);
3274
3275 /* Send Hardware Error to upper stack */
3276 return hci_recv_frame(hdev, skb);
3277}
3278EXPORT_SYMBOL(hci_reset_dev);
3279
Marcel Holtmann76bca882009-11-18 00:40:39 +01003280/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003281int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003282{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003283 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003284 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003285 kfree_skb(skb);
3286 return -ENXIO;
3287 }
3288
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003289 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003290 bt_cb(skb)->incoming = 1;
3291
3292 /* Time stamp */
3293 __net_timestamp(skb);
3294
Marcel Holtmann76bca882009-11-18 00:40:39 +01003295 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003296 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003297
Marcel Holtmann76bca882009-11-18 00:40:39 +01003298 return 0;
3299}
3300EXPORT_SYMBOL(hci_recv_frame);
3301
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303302static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003303 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303304{
3305 int len = 0;
3306 int hlen = 0;
3307 int remain = count;
3308 struct sk_buff *skb;
3309 struct bt_skb_cb *scb;
3310
3311 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003312 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303313 return -EILSEQ;
3314
3315 skb = hdev->reassembly[index];
3316
3317 if (!skb) {
3318 switch (type) {
3319 case HCI_ACLDATA_PKT:
3320 len = HCI_MAX_FRAME_SIZE;
3321 hlen = HCI_ACL_HDR_SIZE;
3322 break;
3323 case HCI_EVENT_PKT:
3324 len = HCI_MAX_EVENT_SIZE;
3325 hlen = HCI_EVENT_HDR_SIZE;
3326 break;
3327 case HCI_SCODATA_PKT:
3328 len = HCI_MAX_SCO_SIZE;
3329 hlen = HCI_SCO_HDR_SIZE;
3330 break;
3331 }
3332
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003333 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303334 if (!skb)
3335 return -ENOMEM;
3336
3337 scb = (void *) skb->cb;
3338 scb->expect = hlen;
3339 scb->pkt_type = type;
3340
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303341 hdev->reassembly[index] = skb;
3342 }
3343
3344 while (count) {
3345 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003346 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303347
3348 memcpy(skb_put(skb, len), data, len);
3349
3350 count -= len;
3351 data += len;
3352 scb->expect -= len;
3353 remain = count;
3354
3355 switch (type) {
3356 case HCI_EVENT_PKT:
3357 if (skb->len == HCI_EVENT_HDR_SIZE) {
3358 struct hci_event_hdr *h = hci_event_hdr(skb);
3359 scb->expect = h->plen;
3360
3361 if (skb_tailroom(skb) < scb->expect) {
3362 kfree_skb(skb);
3363 hdev->reassembly[index] = NULL;
3364 return -ENOMEM;
3365 }
3366 }
3367 break;
3368
3369 case HCI_ACLDATA_PKT:
3370 if (skb->len == HCI_ACL_HDR_SIZE) {
3371 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3372 scb->expect = __le16_to_cpu(h->dlen);
3373
3374 if (skb_tailroom(skb) < scb->expect) {
3375 kfree_skb(skb);
3376 hdev->reassembly[index] = NULL;
3377 return -ENOMEM;
3378 }
3379 }
3380 break;
3381
3382 case HCI_SCODATA_PKT:
3383 if (skb->len == HCI_SCO_HDR_SIZE) {
3384 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3385 scb->expect = h->dlen;
3386
3387 if (skb_tailroom(skb) < scb->expect) {
3388 kfree_skb(skb);
3389 hdev->reassembly[index] = NULL;
3390 return -ENOMEM;
3391 }
3392 }
3393 break;
3394 }
3395
3396 if (scb->expect == 0) {
3397 /* Complete frame */
3398
3399 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003400 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303401
3402 hdev->reassembly[index] = NULL;
3403 return remain;
3404 }
3405 }
3406
3407 return remain;
3408}
3409
Suraj Sumangala99811512010-07-14 13:02:19 +05303410#define STREAM_REASSEMBLY 0
3411
3412int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3413{
3414 int type;
3415 int rem = 0;
3416
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003417 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303418 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3419
3420 if (!skb) {
3421 struct { char type; } *pkt;
3422
3423 /* Start of the frame */
3424 pkt = data;
3425 type = pkt->type;
3426
3427 data++;
3428 count--;
3429 } else
3430 type = bt_cb(skb)->pkt_type;
3431
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003432 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003433 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303434 if (rem < 0)
3435 return rem;
3436
3437 data += (count - rem);
3438 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003439 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303440
3441 return rem;
3442}
3443EXPORT_SYMBOL(hci_recv_stream_fragment);
3444
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445/* ---- Interface to upper protocols ---- */
3446
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447int hci_register_cb(struct hci_cb *cb)
3448{
3449 BT_DBG("%p name %s", cb, cb->name);
3450
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003451 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003453 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454
3455 return 0;
3456}
3457EXPORT_SYMBOL(hci_register_cb);
3458
3459int hci_unregister_cb(struct hci_cb *cb)
3460{
3461 BT_DBG("%p name %s", cb, cb->name);
3462
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003463 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003465 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466
3467 return 0;
3468}
3469EXPORT_SYMBOL(hci_unregister_cb);
3470
Marcel Holtmann51086992013-10-10 14:54:19 -07003471static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003473 int err;
3474
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003475 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003477 /* Time stamp */
3478 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003480 /* Send copy to monitor */
3481 hci_send_to_monitor(hdev, skb);
3482
3483 if (atomic_read(&hdev->promisc)) {
3484 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003485 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 }
3487
3488 /* Get rid of skb owner, prior to sending to the driver. */
3489 skb_orphan(skb);
3490
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003491 err = hdev->send(hdev, skb);
3492 if (err < 0) {
3493 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3494 kfree_skb(skb);
3495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496}
3497
Marcel Holtmann899de762014-07-11 05:51:58 +02003498bool hci_req_pending(struct hci_dev *hdev)
3499{
3500 return (hdev->req_status == HCI_REQ_PEND);
3501}
3502
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003503/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003504int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3505 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003506{
3507 struct sk_buff *skb;
3508
3509 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3510
3511 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3512 if (!skb) {
3513 BT_ERR("%s no memory for command", hdev->name);
3514 return -ENOMEM;
3515 }
3516
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003517 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003518 * single-command requests.
3519 */
Eyal Birger49a6fe02015-03-01 14:58:25 +02003520 bt_cb(skb)->req_start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02003521
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003523 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524
3525 return 0;
3526}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527
3528/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003529void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530{
3531 struct hci_command_hdr *hdr;
3532
3533 if (!hdev->sent_cmd)
3534 return NULL;
3535
3536 hdr = (void *) hdev->sent_cmd->data;
3537
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003538 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 return NULL;
3540
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003541 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542
3543 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3544}
3545
3546/* Send ACL data */
3547static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3548{
3549 struct hci_acl_hdr *hdr;
3550 int len = skb->len;
3551
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003552 skb_push(skb, HCI_ACL_HDR_SIZE);
3553 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003554 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003555 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3556 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557}
3558
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003559static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003560 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003562 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 struct hci_dev *hdev = conn->hdev;
3564 struct sk_buff *list;
3565
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003566 skb->len = skb_headlen(skb);
3567 skb->data_len = 0;
3568
3569 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003570
3571 switch (hdev->dev_type) {
3572 case HCI_BREDR:
3573 hci_add_acl_hdr(skb, conn->handle, flags);
3574 break;
3575 case HCI_AMP:
3576 hci_add_acl_hdr(skb, chan->handle, flags);
3577 break;
3578 default:
3579 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3580 return;
3581 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003582
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003583 list = skb_shinfo(skb)->frag_list;
3584 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 /* Non fragmented */
3586 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3587
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003588 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589 } else {
3590 /* Fragmented */
3591 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3592
3593 skb_shinfo(skb)->frag_list = NULL;
3594
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003595 /* Queue all fragments atomically. We need to use spin_lock_bh
3596 * here because of 6LoWPAN links, as there this function is
3597 * called from softirq and using normal spin lock could cause
3598 * deadlocks.
3599 */
3600 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003602 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003603
3604 flags &= ~ACL_START;
3605 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 do {
3607 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003608
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003609 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003610 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611
3612 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3613
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003614 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615 } while (list);
3616
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003617 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003619}
3620
3621void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3622{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003623 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003624
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003625 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003626
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003627 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003629 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631
3632/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003633void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634{
3635 struct hci_dev *hdev = conn->hdev;
3636 struct hci_sco_hdr hdr;
3637
3638 BT_DBG("%s len %d", hdev->name, skb->len);
3639
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003640 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 hdr.dlen = skb->len;
3642
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003643 skb_push(skb, HCI_SCO_HDR_SIZE);
3644 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003645 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003647 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003648
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003650 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652
3653/* ---- HCI TX task (outgoing data) ---- */
3654
3655/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003656static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3657 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658{
3659 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003660 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003661 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003663 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003665
3666 rcu_read_lock();
3667
3668 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003669 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003671
3672 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3673 continue;
3674
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675 num++;
3676
3677 if (c->sent < min) {
3678 min = c->sent;
3679 conn = c;
3680 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003681
3682 if (hci_conn_num(hdev, type) == num)
3683 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 }
3685
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003686 rcu_read_unlock();
3687
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003689 int cnt, q;
3690
3691 switch (conn->type) {
3692 case ACL_LINK:
3693 cnt = hdev->acl_cnt;
3694 break;
3695 case SCO_LINK:
3696 case ESCO_LINK:
3697 cnt = hdev->sco_cnt;
3698 break;
3699 case LE_LINK:
3700 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3701 break;
3702 default:
3703 cnt = 0;
3704 BT_ERR("Unknown link type");
3705 }
3706
3707 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708 *quote = q ? q : 1;
3709 } else
3710 *quote = 0;
3711
3712 BT_DBG("conn %p quote %d", conn, *quote);
3713 return conn;
3714}
3715
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003716static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717{
3718 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003719 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003720
Ville Tervobae1f5d92011-02-10 22:38:53 -03003721 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003723 rcu_read_lock();
3724
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003726 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003727 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003728 BT_ERR("%s killing stalled connection %pMR",
3729 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003730 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731 }
3732 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003733
3734 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735}
3736
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003737static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3738 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003739{
3740 struct hci_conn_hash *h = &hdev->conn_hash;
3741 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003742 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003743 struct hci_conn *conn;
3744 int cnt, q, conn_num = 0;
3745
3746 BT_DBG("%s", hdev->name);
3747
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003748 rcu_read_lock();
3749
3750 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003751 struct hci_chan *tmp;
3752
3753 if (conn->type != type)
3754 continue;
3755
3756 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3757 continue;
3758
3759 conn_num++;
3760
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003761 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003762 struct sk_buff *skb;
3763
3764 if (skb_queue_empty(&tmp->data_q))
3765 continue;
3766
3767 skb = skb_peek(&tmp->data_q);
3768 if (skb->priority < cur_prio)
3769 continue;
3770
3771 if (skb->priority > cur_prio) {
3772 num = 0;
3773 min = ~0;
3774 cur_prio = skb->priority;
3775 }
3776
3777 num++;
3778
3779 if (conn->sent < min) {
3780 min = conn->sent;
3781 chan = tmp;
3782 }
3783 }
3784
3785 if (hci_conn_num(hdev, type) == conn_num)
3786 break;
3787 }
3788
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003789 rcu_read_unlock();
3790
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003791 if (!chan)
3792 return NULL;
3793
3794 switch (chan->conn->type) {
3795 case ACL_LINK:
3796 cnt = hdev->acl_cnt;
3797 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003798 case AMP_LINK:
3799 cnt = hdev->block_cnt;
3800 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003801 case SCO_LINK:
3802 case ESCO_LINK:
3803 cnt = hdev->sco_cnt;
3804 break;
3805 case LE_LINK:
3806 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3807 break;
3808 default:
3809 cnt = 0;
3810 BT_ERR("Unknown link type");
3811 }
3812
3813 q = cnt / num;
3814 *quote = q ? q : 1;
3815 BT_DBG("chan %p quote %d", chan, *quote);
3816 return chan;
3817}
3818
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003819static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3820{
3821 struct hci_conn_hash *h = &hdev->conn_hash;
3822 struct hci_conn *conn;
3823 int num = 0;
3824
3825 BT_DBG("%s", hdev->name);
3826
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003827 rcu_read_lock();
3828
3829 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003830 struct hci_chan *chan;
3831
3832 if (conn->type != type)
3833 continue;
3834
3835 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3836 continue;
3837
3838 num++;
3839
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003840 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003841 struct sk_buff *skb;
3842
3843 if (chan->sent) {
3844 chan->sent = 0;
3845 continue;
3846 }
3847
3848 if (skb_queue_empty(&chan->data_q))
3849 continue;
3850
3851 skb = skb_peek(&chan->data_q);
3852 if (skb->priority >= HCI_PRIO_MAX - 1)
3853 continue;
3854
3855 skb->priority = HCI_PRIO_MAX - 1;
3856
3857 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003858 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003859 }
3860
3861 if (hci_conn_num(hdev, type) == num)
3862 break;
3863 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003864
3865 rcu_read_unlock();
3866
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003867}
3868
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003869static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3870{
3871 /* Calculate count of blocks used by this packet */
3872 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3873}
3874
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003875static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876{
Marcel Holtmann4a964402014-07-02 19:10:33 +02003877 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878 /* ACL tx timeout must be longer than maximum
3879 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003880 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003881 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003882 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003884}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003886static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003887{
3888 unsigned int cnt = hdev->acl_cnt;
3889 struct hci_chan *chan;
3890 struct sk_buff *skb;
3891 int quote;
3892
3893 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003894
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003895 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003896 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003897 u32 priority = (skb_peek(&chan->data_q))->priority;
3898 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003899 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003900 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003901
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003902 /* Stop if priority has changed */
3903 if (skb->priority < priority)
3904 break;
3905
3906 skb = skb_dequeue(&chan->data_q);
3907
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003908 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003909 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003910
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003911 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912 hdev->acl_last_tx = jiffies;
3913
3914 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003915 chan->sent++;
3916 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917 }
3918 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003919
3920 if (cnt != hdev->acl_cnt)
3921 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003922}
3923
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003924static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003925{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003926 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003927 struct hci_chan *chan;
3928 struct sk_buff *skb;
3929 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003930 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003931
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003932 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003933
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003934 BT_DBG("%s", hdev->name);
3935
3936 if (hdev->dev_type == HCI_AMP)
3937 type = AMP_LINK;
3938 else
3939 type = ACL_LINK;
3940
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003941 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003942 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003943 u32 priority = (skb_peek(&chan->data_q))->priority;
3944 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3945 int blocks;
3946
3947 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003948 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003949
3950 /* Stop if priority has changed */
3951 if (skb->priority < priority)
3952 break;
3953
3954 skb = skb_dequeue(&chan->data_q);
3955
3956 blocks = __get_blocks(hdev, skb);
3957 if (blocks > hdev->block_cnt)
3958 return;
3959
3960 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003961 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003962
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003963 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003964 hdev->acl_last_tx = jiffies;
3965
3966 hdev->block_cnt -= blocks;
3967 quote -= blocks;
3968
3969 chan->sent += blocks;
3970 chan->conn->sent += blocks;
3971 }
3972 }
3973
3974 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003975 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003976}
3977
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003978static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003979{
3980 BT_DBG("%s", hdev->name);
3981
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003982 /* No ACL link over BR/EDR controller */
3983 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3984 return;
3985
3986 /* No AMP link over AMP controller */
3987 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003988 return;
3989
3990 switch (hdev->flow_ctl_mode) {
3991 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3992 hci_sched_acl_pkt(hdev);
3993 break;
3994
3995 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3996 hci_sched_acl_blk(hdev);
3997 break;
3998 }
3999}
4000
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004002static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004003{
4004 struct hci_conn *conn;
4005 struct sk_buff *skb;
4006 int quote;
4007
4008 BT_DBG("%s", hdev->name);
4009
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004010 if (!hci_conn_num(hdev, SCO_LINK))
4011 return;
4012
Linus Torvalds1da177e2005-04-16 15:20:36 -07004013 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4014 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4015 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004016 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017
4018 conn->sent++;
4019 if (conn->sent == ~0)
4020 conn->sent = 0;
4021 }
4022 }
4023}
4024
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004025static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004026{
4027 struct hci_conn *conn;
4028 struct sk_buff *skb;
4029 int quote;
4030
4031 BT_DBG("%s", hdev->name);
4032
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004033 if (!hci_conn_num(hdev, ESCO_LINK))
4034 return;
4035
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004036 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4037 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004038 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4039 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004040 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004041
4042 conn->sent++;
4043 if (conn->sent == ~0)
4044 conn->sent = 0;
4045 }
4046 }
4047}
4048
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004049static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004050{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004051 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004052 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004053 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004054
4055 BT_DBG("%s", hdev->name);
4056
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004057 if (!hci_conn_num(hdev, LE_LINK))
4058 return;
4059
Marcel Holtmann4a964402014-07-02 19:10:33 +02004060 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004061 /* LE tx timeout must be longer than maximum
4062 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004063 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004064 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004065 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004066 }
4067
4068 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004069 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004070 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004071 u32 priority = (skb_peek(&chan->data_q))->priority;
4072 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004073 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004074 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004075
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004076 /* Stop if priority has changed */
4077 if (skb->priority < priority)
4078 break;
4079
4080 skb = skb_dequeue(&chan->data_q);
4081
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004082 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004083 hdev->le_last_tx = jiffies;
4084
4085 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004086 chan->sent++;
4087 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004088 }
4089 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004090
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004091 if (hdev->le_pkts)
4092 hdev->le_cnt = cnt;
4093 else
4094 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004095
4096 if (cnt != tmp)
4097 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004098}
4099
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004100static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004102 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103 struct sk_buff *skb;
4104
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004105 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004106 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107
Marcel Holtmann52de5992013-09-03 18:08:38 -07004108 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4109 /* Schedule queues and send stuff to HCI driver */
4110 hci_sched_acl(hdev);
4111 hci_sched_sco(hdev);
4112 hci_sched_esco(hdev);
4113 hci_sched_le(hdev);
4114 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004115
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116 /* Send next queued raw (unknown type) packet */
4117 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004118 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119}
4120
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004121/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122
4123/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004124static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125{
4126 struct hci_acl_hdr *hdr = (void *) skb->data;
4127 struct hci_conn *conn;
4128 __u16 handle, flags;
4129
4130 skb_pull(skb, HCI_ACL_HDR_SIZE);
4131
4132 handle = __le16_to_cpu(hdr->handle);
4133 flags = hci_flags(handle);
4134 handle = hci_handle(handle);
4135
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004136 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004137 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138
4139 hdev->stat.acl_rx++;
4140
4141 hci_dev_lock(hdev);
4142 conn = hci_conn_hash_lookup_handle(hdev, handle);
4143 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004144
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004146 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004147
Linus Torvalds1da177e2005-04-16 15:20:36 -07004148 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004149 l2cap_recv_acldata(conn, skb, flags);
4150 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004152 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004153 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154 }
4155
4156 kfree_skb(skb);
4157}
4158
4159/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004160static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161{
4162 struct hci_sco_hdr *hdr = (void *) skb->data;
4163 struct hci_conn *conn;
4164 __u16 handle;
4165
4166 skb_pull(skb, HCI_SCO_HDR_SIZE);
4167
4168 handle = __le16_to_cpu(hdr->handle);
4169
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004170 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171
4172 hdev->stat.sco_rx++;
4173
4174 hci_dev_lock(hdev);
4175 conn = hci_conn_hash_lookup_handle(hdev, handle);
4176 hci_dev_unlock(hdev);
4177
4178 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004180 sco_recv_scodata(conn, skb);
4181 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004183 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004184 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 }
4186
4187 kfree_skb(skb);
4188}
4189
Johan Hedberg9238f362013-03-05 20:37:48 +02004190static bool hci_req_is_complete(struct hci_dev *hdev)
4191{
4192 struct sk_buff *skb;
4193
4194 skb = skb_peek(&hdev->cmd_q);
4195 if (!skb)
4196 return true;
4197
Eyal Birger49a6fe02015-03-01 14:58:25 +02004198 return bt_cb(skb)->req_start;
Johan Hedberg9238f362013-03-05 20:37:48 +02004199}
4200
Johan Hedberg42c6b122013-03-05 20:37:49 +02004201static void hci_resend_last(struct hci_dev *hdev)
4202{
4203 struct hci_command_hdr *sent;
4204 struct sk_buff *skb;
4205 u16 opcode;
4206
4207 if (!hdev->sent_cmd)
4208 return;
4209
4210 sent = (void *) hdev->sent_cmd->data;
4211 opcode = __le16_to_cpu(sent->opcode);
4212 if (opcode == HCI_OP_RESET)
4213 return;
4214
4215 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4216 if (!skb)
4217 return;
4218
4219 skb_queue_head(&hdev->cmd_q, skb);
4220 queue_work(hdev->workqueue, &hdev->cmd_work);
4221}
4222
Johan Hedberg9238f362013-03-05 20:37:48 +02004223void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4224{
4225 hci_req_complete_t req_complete = NULL;
4226 struct sk_buff *skb;
4227 unsigned long flags;
4228
4229 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4230
Johan Hedberg42c6b122013-03-05 20:37:49 +02004231 /* If the completed command doesn't match the last one that was
4232 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004233 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004234 if (!hci_sent_cmd_data(hdev, opcode)) {
4235 /* Some CSR based controllers generate a spontaneous
4236 * reset complete event during init and any pending
4237 * command will never be completed. In such a case we
4238 * need to resend whatever was the last sent
4239 * command.
4240 */
4241 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4242 hci_resend_last(hdev);
4243
Johan Hedberg9238f362013-03-05 20:37:48 +02004244 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004245 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004246
4247 /* If the command succeeded and there's still more commands in
4248 * this request the request is not yet complete.
4249 */
4250 if (!status && !hci_req_is_complete(hdev))
4251 return;
4252
4253 /* If this was the last command in a request the complete
4254 * callback would be found in hdev->sent_cmd instead of the
4255 * command queue (hdev->cmd_q).
4256 */
4257 if (hdev->sent_cmd) {
Eyal Birger49a6fe02015-03-01 14:58:25 +02004258 req_complete = bt_cb(hdev->sent_cmd)->req_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004259
4260 if (req_complete) {
4261 /* We must set the complete callback to NULL to
4262 * avoid calling the callback more than once if
4263 * this function gets called again.
4264 */
Eyal Birger49a6fe02015-03-01 14:58:25 +02004265 bt_cb(hdev->sent_cmd)->req_complete = NULL;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004266
Johan Hedberg9238f362013-03-05 20:37:48 +02004267 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004268 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004269 }
4270
4271 /* Remove all pending commands belonging to this request */
4272 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4273 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Eyal Birger49a6fe02015-03-01 14:58:25 +02004274 if (bt_cb(skb)->req_start) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004275 __skb_queue_head(&hdev->cmd_q, skb);
4276 break;
4277 }
4278
Eyal Birger49a6fe02015-03-01 14:58:25 +02004279 req_complete = bt_cb(skb)->req_complete;
Johan Hedberg9238f362013-03-05 20:37:48 +02004280 kfree_skb(skb);
4281 }
4282 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4283
4284call_complete:
4285 if (req_complete)
Marcel Holtmann1904a852015-01-11 13:50:44 -08004286 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
Johan Hedberg9238f362013-03-05 20:37:48 +02004287}
4288
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004289static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004291 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292 struct sk_buff *skb;
4293
4294 BT_DBG("%s", hdev->name);
4295
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004297 /* Send copy to monitor */
4298 hci_send_to_monitor(hdev, skb);
4299
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 if (atomic_read(&hdev->promisc)) {
4301 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004302 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 }
4304
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004305 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306 kfree_skb(skb);
4307 continue;
4308 }
4309
4310 if (test_bit(HCI_INIT, &hdev->flags)) {
4311 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004312 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313 case HCI_ACLDATA_PKT:
4314 case HCI_SCODATA_PKT:
4315 kfree_skb(skb);
4316 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004317 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318 }
4319
4320 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004321 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004323 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 hci_event_packet(hdev, skb);
4325 break;
4326
4327 case HCI_ACLDATA_PKT:
4328 BT_DBG("%s ACL data packet", hdev->name);
4329 hci_acldata_packet(hdev, skb);
4330 break;
4331
4332 case HCI_SCODATA_PKT:
4333 BT_DBG("%s SCO data packet", hdev->name);
4334 hci_scodata_packet(hdev, skb);
4335 break;
4336
4337 default:
4338 kfree_skb(skb);
4339 break;
4340 }
4341 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342}
4343
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004344static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004346 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347 struct sk_buff *skb;
4348
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004349 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4350 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004353 if (atomic_read(&hdev->cmd_cnt)) {
4354 skb = skb_dequeue(&hdev->cmd_q);
4355 if (!skb)
4356 return;
4357
Wei Yongjun7585b972009-02-25 18:29:52 +08004358 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004360 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004361 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004363 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004364 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004365 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004366 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004367 schedule_delayed_work(&hdev->cmd_timer,
4368 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369 } else {
4370 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004371 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372 }
4373 }
4374}