blob: 17f52a195ba8555c3d745c5ec8768118f0a0a899 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020054DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070083 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700109 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700130 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* ---- HCI requests ---- */
143
Marcel Holtmann1904a852015-01-11 13:50:44 -0800144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
152 }
153}
154
155static void hci_req_cancel(struct hci_dev *hdev, int err)
156{
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
163 }
164}
165
Fengguang Wu77a63e02013-04-20 16:24:31 +0300166static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300168{
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
172
173 hci_dev_lock(hdev);
174
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
177
178 hci_dev_unlock(hdev);
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
186 }
187
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
195 }
196
Johan Hedberg75e84b72013-04-02 13:35:04 +0300197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
200 }
201
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
205 }
206
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
209
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
212
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
215
216failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
219}
220
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300221struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300222 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300223{
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
226 int err = 0;
227
228 BT_DBG("%s", hdev->name);
229
230 hci_req_init(&req, hdev);
231
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300232 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300233
234 hdev->req_status = HCI_REQ_PEND;
235
Johan Hedberg75e84b72013-04-02 13:35:04 +0300236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
238
Chan-yeol Park039fada2014-10-31 14:23:06 +0900239 err = hci_req_run(&req, hci_req_sync_complete);
240 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200242 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900243 return ERR_PTR(err);
244 }
245
Johan Hedberg75e84b72013-04-02 13:35:04 +0300246 schedule_timeout(timeout);
247
248 remove_wait_queue(&hdev->req_wait_q, &wait);
249
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
252
253 switch (hdev->req_status) {
254 case HCI_REQ_DONE:
255 err = -bt_to_errno(hdev->req_result);
256 break;
257
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
260 break;
261
262 default:
263 err = -ETIMEDOUT;
264 break;
265 }
266
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 if (err < 0)
272 return ERR_PTR(err);
273
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300274 return hci_get_cmd_complete(hdev, opcode, event);
275}
276EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300279 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300280{
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300282}
283EXPORT_SYMBOL(__hci_cmd_sync);
284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200286static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200287 void (*func)(struct hci_request *req,
288 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200289 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 DECLARE_WAITQUEUE(wait, current);
293 int err = 0;
294
295 BT_DBG("%s start", hdev->name);
296
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_init(&req, hdev);
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 hdev->req_status = HCI_REQ_PEND;
300
Johan Hedberg42c6b122013-03-05 20:37:49 +0200301 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200302
Chan-yeol Park039fada2014-10-31 14:23:06 +0900303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
305
Johan Hedberg42c6b122013-03-05 20:37:49 +0200306 err = hci_req_run(&req, hci_req_sync_complete);
307 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200308 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300309
Chan-yeol Park039fada2014-10-31 14:23:06 +0900310 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200311 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900312
Andre Guedes920c8302013-03-08 11:20:15 -0300313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 */
Andre Guedes920c8302013-03-08 11:20:15 -0300318 if (err == -ENODATA)
319 return 0;
320
321 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200322 }
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 schedule_timeout(timeout);
325
326 remove_wait_queue(&hdev->req_wait_q, &wait);
327
328 if (signal_pending(current))
329 return -EINTR;
330
331 switch (hdev->req_status) {
332 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700333 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 break;
335
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
338 break;
339
340 default:
341 err = -ETIMEDOUT;
342 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Johan Hedberga5040ef2011-01-10 13:28:59 +0200345 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 BT_DBG("%s end: err %d", hdev->name, err);
348
349 return err;
350}
351
Johan Hedberg01178cd2013-03-05 20:37:41 +0200352static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200353 void (*req)(struct hci_request *req,
354 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200355 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
357 int ret;
358
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200359 if (!test_bit(HCI_UP, &hdev->flags))
360 return -ENETDOWN;
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 /* Serialize all requests */
363 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200364 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 hci_req_unlock(hdev);
366
367 return ret;
368}
369
Johan Hedberg42c6b122013-03-05 20:37:49 +0200370static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200372 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200386 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200388
389 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
392
Johan Hedberg0af801b2015-02-17 15:05:21 +0200393static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200394{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200396
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200397 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300399
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300403 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200404 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300405
406 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700408
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700409 /* Read Flow Control Mode */
410 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
411
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700412 /* Read Location Data */
413 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200414}
415
Johan Hedberg0af801b2015-02-17 15:05:21 +0200416static void amp_init2(struct hci_request *req)
417{
418 /* Read Local Supported Features. Not all AMP controllers
419 * support this so it's placed conditionally in the second
420 * stage init.
421 */
422 if (req->hdev->commands[14] & 0x20)
423 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200427{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200428 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200429
430 BT_DBG("%s %ld", hdev->name, opt);
431
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300432 /* Reset */
433 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200434 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300435
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200436 switch (hdev->dev_type) {
437 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200438 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200439 break;
440
441 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200442 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200443 break;
444
445 default:
446 BT_ERR("Unknown device type %d", hdev->dev_type);
447 break;
448 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200449}
450
Johan Hedberg42c6b122013-03-05 20:37:49 +0200451static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200452{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200453 __le16 param;
454 __u8 flt_type;
455
456 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200457 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200458
459 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200460 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200461
462 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200463 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200464
465 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200466 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700468 /* Read Number of Supported IAC */
469 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
470
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700471 /* Read Current IAC LAP */
472 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
473
Johan Hedberg2177bab2013-03-05 20:37:43 +0200474 /* Clear Event Filters */
475 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200476 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200477
478 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700479 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200480 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200481}
482
Johan Hedberg42c6b122013-03-05 20:37:49 +0200483static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200484{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300485 struct hci_dev *hdev = req->hdev;
486
Johan Hedberg2177bab2013-03-05 20:37:43 +0200487 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200488 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489
490 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200491 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200492
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800493 /* Read LE Supported States */
494 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
495
Johan Hedberg2177bab2013-03-05 20:37:43 +0200496 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200497 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800499 /* Clear LE White List */
500 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300501
502 /* LE-only controllers have LE implicitly enabled */
503 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700504 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200505}
506
Johan Hedberg42c6b122013-03-05 20:37:49 +0200507static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200508{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200509 struct hci_dev *hdev = req->hdev;
510
Johan Hedberg2177bab2013-03-05 20:37:43 +0200511 /* The second byte is 0xff instead of 0x9f (two reserved bits
512 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
513 * command otherwise.
514 */
515 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
516
517 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
518 * any event mask for pre 1.2 devices.
519 */
520 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
521 return;
522
523 if (lmp_bredr_capable(hdev)) {
524 events[4] |= 0x01; /* Flow Specification Complete */
525 events[4] |= 0x02; /* Inquiry Result with RSSI */
526 events[4] |= 0x04; /* Read Remote Extended Features Complete */
527 events[5] |= 0x08; /* Synchronous Connection Complete */
528 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700529 } else {
530 /* Use a different default for LE-only devices */
531 memset(events, 0, sizeof(events));
532 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700533 events[1] |= 0x08; /* Read Remote Version Information Complete */
534 events[1] |= 0x20; /* Command Complete */
535 events[1] |= 0x40; /* Command Status */
536 events[1] |= 0x80; /* Hardware Error */
537 events[2] |= 0x04; /* Number of Completed Packets */
538 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200539
540 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
541 events[0] |= 0x80; /* Encryption Change */
542 events[5] |= 0x80; /* Encryption Key Refresh Complete */
543 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200544 }
545
546 if (lmp_inq_rssi_capable(hdev))
547 events[4] |= 0x02; /* Inquiry Result with RSSI */
548
549 if (lmp_sniffsubr_capable(hdev))
550 events[5] |= 0x20; /* Sniff Subrating */
551
552 if (lmp_pause_enc_capable(hdev))
553 events[5] |= 0x80; /* Encryption Key Refresh Complete */
554
555 if (lmp_ext_inq_capable(hdev))
556 events[5] |= 0x40; /* Extended Inquiry Result */
557
558 if (lmp_no_flush_capable(hdev))
559 events[7] |= 0x01; /* Enhanced Flush Complete */
560
561 if (lmp_lsto_capable(hdev))
562 events[6] |= 0x80; /* Link Supervision Timeout Changed */
563
564 if (lmp_ssp_capable(hdev)) {
565 events[6] |= 0x01; /* IO Capability Request */
566 events[6] |= 0x02; /* IO Capability Response */
567 events[6] |= 0x04; /* User Confirmation Request */
568 events[6] |= 0x08; /* User Passkey Request */
569 events[6] |= 0x10; /* Remote OOB Data Request */
570 events[6] |= 0x20; /* Simple Pairing Complete */
571 events[7] |= 0x04; /* User Passkey Notification */
572 events[7] |= 0x08; /* Keypress Notification */
573 events[7] |= 0x10; /* Remote Host Supported
574 * Features Notification
575 */
576 }
577
578 if (lmp_le_capable(hdev))
579 events[7] |= 0x20; /* LE Meta-Event */
580
Johan Hedberg42c6b122013-03-05 20:37:49 +0200581 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200582}
583
Johan Hedberg42c6b122013-03-05 20:37:49 +0200584static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200585{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 struct hci_dev *hdev = req->hdev;
587
Johan Hedberg0af801b2015-02-17 15:05:21 +0200588 if (hdev->dev_type == HCI_AMP)
589 return amp_init2(req);
590
Johan Hedberg2177bab2013-03-05 20:37:43 +0200591 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200592 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300593 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700594 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200595
596 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200597 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100599 /* All Bluetooth 1.2 and later controllers should support the
600 * HCI command for reading the local supported commands.
601 *
602 * Unfortunately some controllers indicate Bluetooth 1.2 support,
603 * but do not have support for this command. If that is the case,
604 * the driver can quirk the behavior and skip reading the local
605 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300606 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100607 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
608 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200609 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610
611 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700612 /* When SSP is available, then the host features page
613 * should also be available as well. However some
614 * controllers list the max_page as 0 as long as SSP
615 * has not been enabled. To achieve proper debugging
616 * output, force the minimum max_page to 1 at least.
617 */
618 hdev->max_page = 0x01;
619
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700620 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200621 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800622
Johan Hedberg42c6b122013-03-05 20:37:49 +0200623 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
624 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200625 } else {
626 struct hci_cp_write_eir cp;
627
628 memset(hdev->eir, 0, sizeof(hdev->eir));
629 memset(&cp, 0, sizeof(cp));
630
Johan Hedberg42c6b122013-03-05 20:37:49 +0200631 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200632 }
633 }
634
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800635 if (lmp_inq_rssi_capable(hdev) ||
636 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800637 u8 mode;
638
639 /* If Extended Inquiry Result events are supported, then
640 * they are clearly preferred over Inquiry Result with RSSI
641 * events.
642 */
643 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
644
645 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
646 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200647
648 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200649 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200650
651 if (lmp_ext_feat_capable(hdev)) {
652 struct hci_cp_read_local_ext_features cp;
653
654 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200655 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
656 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200657 }
658
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700659 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200661 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
662 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663 }
664}
665
Johan Hedberg42c6b122013-03-05 20:37:49 +0200666static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200667{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200668 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200669 struct hci_cp_write_def_link_policy cp;
670 u16 link_policy = 0;
671
672 if (lmp_rswitch_capable(hdev))
673 link_policy |= HCI_LP_RSWITCH;
674 if (lmp_hold_capable(hdev))
675 link_policy |= HCI_LP_HOLD;
676 if (lmp_sniff_capable(hdev))
677 link_policy |= HCI_LP_SNIFF;
678 if (lmp_park_capable(hdev))
679 link_policy |= HCI_LP_PARK;
680
681 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200682 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200683}
684
Johan Hedberg42c6b122013-03-05 20:37:49 +0200685static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200686{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200687 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200688 struct hci_cp_write_le_host_supported cp;
689
Johan Hedbergc73eee92013-04-19 18:35:21 +0300690 /* LE-only devices do not support explicit enablement */
691 if (!lmp_bredr_capable(hdev))
692 return;
693
Johan Hedberg2177bab2013-03-05 20:37:43 +0200694 memset(&cp, 0, sizeof(cp));
695
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700696 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200697 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200698 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200699 }
700
701 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200702 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
703 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200704}
705
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300706static void hci_set_event_mask_page_2(struct hci_request *req)
707{
708 struct hci_dev *hdev = req->hdev;
709 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
710
711 /* If Connectionless Slave Broadcast master role is supported
712 * enable all necessary events for it.
713 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800714 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300715 events[1] |= 0x40; /* Triggered Clock Capture */
716 events[1] |= 0x80; /* Synchronization Train Complete */
717 events[2] |= 0x10; /* Slave Page Response Timeout */
718 events[2] |= 0x20; /* CSB Channel Map Change */
719 }
720
721 /* If Connectionless Slave Broadcast slave role is supported
722 * enable all necessary events for it.
723 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800724 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300725 events[2] |= 0x01; /* Synchronization Train Received */
726 events[2] |= 0x02; /* CSB Receive */
727 events[2] |= 0x04; /* CSB Timeout */
728 events[2] |= 0x08; /* Truncated Page Complete */
729 }
730
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800731 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200732 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800733 events[2] |= 0x80;
734
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300735 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
736}
737
Johan Hedberg42c6b122013-03-05 20:37:49 +0200738static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200739{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300741 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200742
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200743 hci_setup_event_mask(req);
744
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800745 if (hdev->commands[6] & 0x20) {
746 struct hci_cp_read_stored_link_key cp;
747
748 bacpy(&cp.bdaddr, BDADDR_ANY);
749 cp.read_all = 0x01;
750 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
751 }
752
Johan Hedberg2177bab2013-03-05 20:37:43 +0200753 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200754 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200755
Marcel Holtmann417287d2014-12-11 20:21:54 +0100756 if (hdev->commands[8] & 0x01)
757 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
758
759 /* Some older Broadcom based Bluetooth 1.2 controllers do not
760 * support the Read Page Scan Type command. Check support for
761 * this command in the bit mask of supported commands.
762 */
763 if (hdev->commands[13] & 0x01)
764 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
765
Andre Guedes9193c6e2014-07-01 18:10:09 -0300766 if (lmp_le_capable(hdev)) {
767 u8 events[8];
768
769 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200770 events[0] = 0x0f;
771
772 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
773 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300774
775 /* If controller supports the Connection Parameters Request
776 * Link Layer Procedure, enable the corresponding event.
777 */
778 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
779 events[0] |= 0x20; /* LE Remote Connection
780 * Parameter Request
781 */
782
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100783 /* If the controller supports the Data Length Extension
784 * feature, enable the corresponding event.
785 */
786 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
787 events[0] |= 0x40; /* LE Data Length Change */
788
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100789 /* If the controller supports Extended Scanner Filter
790 * Policies, enable the correspondig event.
791 */
792 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
793 events[1] |= 0x04; /* LE Direct Advertising
794 * Report
795 */
796
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100797 /* If the controller supports the LE Read Local P-256
798 * Public Key command, enable the corresponding event.
799 */
800 if (hdev->commands[34] & 0x02)
801 events[0] |= 0x80; /* LE Read Local P-256
802 * Public Key Complete
803 */
804
805 /* If the controller supports the LE Generate DHKey
806 * command, enable the corresponding event.
807 */
808 if (hdev->commands[34] & 0x04)
809 events[1] |= 0x01; /* LE Generate DHKey Complete */
810
Andre Guedes9193c6e2014-07-01 18:10:09 -0300811 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
812 events);
813
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200814 if (hdev->commands[25] & 0x40) {
815 /* Read LE Advertising Channel TX Power */
816 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
817 }
818
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100819 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
820 /* Read LE Maximum Data Length */
821 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
822
823 /* Read LE Suggested Default Data Length */
824 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
825 }
826
Johan Hedberg42c6b122013-03-05 20:37:49 +0200827 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300828 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300829
830 /* Read features beyond page 1 if available */
831 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
832 struct hci_cp_read_local_ext_features cp;
833
834 cp.page = p;
835 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
836 sizeof(cp), &cp);
837 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200838}
839
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300840static void hci_init4_req(struct hci_request *req, unsigned long opt)
841{
842 struct hci_dev *hdev = req->hdev;
843
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800844 /* Some Broadcom based Bluetooth controllers do not support the
845 * Delete Stored Link Key command. They are clearly indicating its
846 * absence in the bit mask of supported commands.
847 *
848 * Check the supported commands and only if the the command is marked
849 * as supported send it. If not supported assume that the controller
850 * does not have actual support for stored link keys which makes this
851 * command redundant anyway.
852 *
853 * Some controllers indicate that they support handling deleting
854 * stored link keys, but they don't. The quirk lets a driver
855 * just disable this command.
856 */
857 if (hdev->commands[6] & 0x80 &&
858 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
859 struct hci_cp_delete_stored_link_key cp;
860
861 bacpy(&cp.bdaddr, BDADDR_ANY);
862 cp.delete_all = 0x01;
863 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
864 sizeof(cp), &cp);
865 }
866
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300867 /* Set event mask page 2 if the HCI command for it is supported */
868 if (hdev->commands[22] & 0x04)
869 hci_set_event_mask_page_2(req);
870
Marcel Holtmann109e3192014-07-23 19:24:56 +0200871 /* Read local codec list if the HCI command is supported */
872 if (hdev->commands[29] & 0x20)
873 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
874
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200875 /* Get MWS transport configuration if the HCI command is supported */
876 if (hdev->commands[30] & 0x08)
877 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
878
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300879 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800880 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300881 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800882
883 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700884 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800885 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800886 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800887
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800888 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
889 sizeof(support), &support);
890 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300891}
892
Johan Hedberg2177bab2013-03-05 20:37:43 +0200893static int __hci_init(struct hci_dev *hdev)
894{
895 int err;
896
897 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
898 if (err < 0)
899 return err;
900
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700901 /* The Device Under Test (DUT) mode is special and available for
902 * all controller types. So just create it early on.
903 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700904 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700905 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
906 &dut_mode_fops);
907 }
908
Johan Hedberg2177bab2013-03-05 20:37:43 +0200909 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
910 if (err < 0)
911 return err;
912
Johan Hedberg0af801b2015-02-17 15:05:21 +0200913 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
914 * BR/EDR/LE type controllers. AMP controllers only need the
915 * first two stages of init.
916 */
917 if (hdev->dev_type != HCI_BREDR)
918 return 0;
919
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300920 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
921 if (err < 0)
922 return err;
923
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700924 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
925 if (err < 0)
926 return err;
927
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800928 /* This function is only called when the controller is actually in
929 * configured state. When the controller is marked as unconfigured,
930 * this initialization procedure is not run.
931 *
932 * It means that it is possible that a controller runs through its
933 * setup phase and then discovers missing settings. If that is the
934 * case, then this function will not be called. It then will only
935 * be called during the config phase.
936 *
937 * So only when in setup phase or config phase, create the debugfs
938 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700939 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700940 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
941 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700942 return 0;
943
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100944 hci_debugfs_create_common(hdev);
945
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100946 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100947 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700948
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800949 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100950 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700951
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700952 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200953}
954
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200955static void hci_init0_req(struct hci_request *req, unsigned long opt)
956{
957 struct hci_dev *hdev = req->hdev;
958
959 BT_DBG("%s %ld", hdev->name, opt);
960
961 /* Reset */
962 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
963 hci_reset_req(req, 0);
964
965 /* Read Local Version */
966 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
967
968 /* Read BD Address */
969 if (hdev->set_bdaddr)
970 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
971}
972
973static int __hci_unconf_init(struct hci_dev *hdev)
974{
975 int err;
976
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200977 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
978 return 0;
979
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200980 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
981 if (err < 0)
982 return err;
983
984 return 0;
985}
986
Johan Hedberg42c6b122013-03-05 20:37:49 +0200987static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988{
989 __u8 scan = opt;
990
Johan Hedberg42c6b122013-03-05 20:37:49 +0200991 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
993 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200994 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995}
996
Johan Hedberg42c6b122013-03-05 20:37:49 +0200997static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998{
999 __u8 auth = opt;
1000
Johan Hedberg42c6b122013-03-05 20:37:49 +02001001 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
1003 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001004 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005}
1006
Johan Hedberg42c6b122013-03-05 20:37:49 +02001007static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008{
1009 __u8 encrypt = opt;
1010
Johan Hedberg42c6b122013-03-05 20:37:49 +02001011 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001013 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001014 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015}
1016
Johan Hedberg42c6b122013-03-05 20:37:49 +02001017static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001018{
1019 __le16 policy = cpu_to_le16(opt);
1020
Johan Hedberg42c6b122013-03-05 20:37:49 +02001021 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001022
1023 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001024 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001025}
1026
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001027/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 * Device is held on return. */
1029struct hci_dev *hci_dev_get(int index)
1030{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001031 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032
1033 BT_DBG("%d", index);
1034
1035 if (index < 0)
1036 return NULL;
1037
1038 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001039 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 if (d->id == index) {
1041 hdev = hci_dev_hold(d);
1042 break;
1043 }
1044 }
1045 read_unlock(&hci_dev_list_lock);
1046 return hdev;
1047}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
1049/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001050
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001051bool hci_discovery_active(struct hci_dev *hdev)
1052{
1053 struct discovery_state *discov = &hdev->discovery;
1054
Andre Guedes6fbe1952012-02-03 17:47:58 -03001055 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001056 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001057 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001058 return true;
1059
Andre Guedes6fbe1952012-02-03 17:47:58 -03001060 default:
1061 return false;
1062 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001063}
1064
Johan Hedbergff9ef572012-01-04 14:23:45 +02001065void hci_discovery_set_state(struct hci_dev *hdev, int state)
1066{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001067 int old_state = hdev->discovery.state;
1068
Johan Hedbergff9ef572012-01-04 14:23:45 +02001069 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1070
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001071 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001072 return;
1073
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001074 hdev->discovery.state = state;
1075
Johan Hedbergff9ef572012-01-04 14:23:45 +02001076 switch (state) {
1077 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001078 hci_update_background_scan(hdev);
1079
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001080 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001081 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001082 break;
1083 case DISCOVERY_STARTING:
1084 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001085 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001086 mgmt_discovering(hdev, 1);
1087 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001088 case DISCOVERY_RESOLVING:
1089 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001090 case DISCOVERY_STOPPING:
1091 break;
1092 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001093}
1094
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001095void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096{
Johan Hedberg30883512012-01-04 14:16:21 +02001097 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001098 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Johan Hedberg561aafb2012-01-04 13:31:59 +02001100 list_for_each_entry_safe(p, n, &cache->all, all) {
1101 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001102 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001104
1105 INIT_LIST_HEAD(&cache->unknown);
1106 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107}
1108
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001109struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1110 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111{
Johan Hedberg30883512012-01-04 14:16:21 +02001112 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 struct inquiry_entry *e;
1114
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001115 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
Johan Hedberg561aafb2012-01-04 13:31:59 +02001117 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001119 return e;
1120 }
1121
1122 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123}
1124
Johan Hedberg561aafb2012-01-04 13:31:59 +02001125struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001126 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001127{
Johan Hedberg30883512012-01-04 14:16:21 +02001128 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001129 struct inquiry_entry *e;
1130
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001131 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001132
1133 list_for_each_entry(e, &cache->unknown, list) {
1134 if (!bacmp(&e->data.bdaddr, bdaddr))
1135 return e;
1136 }
1137
1138 return NULL;
1139}
1140
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001141struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001142 bdaddr_t *bdaddr,
1143 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001144{
1145 struct discovery_state *cache = &hdev->discovery;
1146 struct inquiry_entry *e;
1147
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001148 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001149
1150 list_for_each_entry(e, &cache->resolve, list) {
1151 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1152 return e;
1153 if (!bacmp(&e->data.bdaddr, bdaddr))
1154 return e;
1155 }
1156
1157 return NULL;
1158}
1159
Johan Hedberga3d4e202012-01-09 00:53:02 +02001160void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001161 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001162{
1163 struct discovery_state *cache = &hdev->discovery;
1164 struct list_head *pos = &cache->resolve;
1165 struct inquiry_entry *p;
1166
1167 list_del(&ie->list);
1168
1169 list_for_each_entry(p, &cache->resolve, list) {
1170 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001171 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001172 break;
1173 pos = &p->list;
1174 }
1175
1176 list_add(&ie->list, pos);
1177}
1178
Marcel Holtmannaf589252014-07-01 14:11:20 +02001179u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1180 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181{
Johan Hedberg30883512012-01-04 14:16:21 +02001182 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001183 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001184 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001186 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
Johan Hedberg6928a922014-10-26 20:46:09 +01001188 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001189
Marcel Holtmannaf589252014-07-01 14:11:20 +02001190 if (!data->ssp_mode)
1191 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001192
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001193 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001194 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001195 if (!ie->data.ssp_mode)
1196 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001197
Johan Hedberga3d4e202012-01-09 00:53:02 +02001198 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001199 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001200 ie->data.rssi = data->rssi;
1201 hci_inquiry_cache_update_resolve(hdev, ie);
1202 }
1203
Johan Hedberg561aafb2012-01-04 13:31:59 +02001204 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001205 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001206
Johan Hedberg561aafb2012-01-04 13:31:59 +02001207 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001208 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001209 if (!ie) {
1210 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1211 goto done;
1212 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001213
1214 list_add(&ie->all, &cache->all);
1215
1216 if (name_known) {
1217 ie->name_state = NAME_KNOWN;
1218 } else {
1219 ie->name_state = NAME_NOT_KNOWN;
1220 list_add(&ie->list, &cache->unknown);
1221 }
1222
1223update:
1224 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001225 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001226 ie->name_state = NAME_KNOWN;
1227 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 }
1229
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001230 memcpy(&ie->data, data, sizeof(*data));
1231 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001233
1234 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001235 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001236
Marcel Holtmannaf589252014-07-01 14:11:20 +02001237done:
1238 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239}
1240
1241static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1242{
Johan Hedberg30883512012-01-04 14:16:21 +02001243 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 struct inquiry_info *info = (struct inquiry_info *) buf;
1245 struct inquiry_entry *e;
1246 int copied = 0;
1247
Johan Hedberg561aafb2012-01-04 13:31:59 +02001248 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001250
1251 if (copied >= num)
1252 break;
1253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 bacpy(&info->bdaddr, &data->bdaddr);
1255 info->pscan_rep_mode = data->pscan_rep_mode;
1256 info->pscan_period_mode = data->pscan_period_mode;
1257 info->pscan_mode = data->pscan_mode;
1258 memcpy(info->dev_class, data->dev_class, 3);
1259 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001260
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001262 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 }
1264
1265 BT_DBG("cache %p, copied %d", cache, copied);
1266 return copied;
1267}
1268
Johan Hedberg42c6b122013-03-05 20:37:49 +02001269static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270{
1271 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001272 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 struct hci_cp_inquiry cp;
1274
1275 BT_DBG("%s", hdev->name);
1276
1277 if (test_bit(HCI_INQUIRY, &hdev->flags))
1278 return;
1279
1280 /* Start Inquiry */
1281 memcpy(&cp.lap, &ir->lap, 3);
1282 cp.length = ir->length;
1283 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001284 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285}
1286
1287int hci_inquiry(void __user *arg)
1288{
1289 __u8 __user *ptr = arg;
1290 struct hci_inquiry_req ir;
1291 struct hci_dev *hdev;
1292 int err = 0, do_inquiry = 0, max_rsp;
1293 long timeo;
1294 __u8 *buf;
1295
1296 if (copy_from_user(&ir, ptr, sizeof(ir)))
1297 return -EFAULT;
1298
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001299 hdev = hci_dev_get(ir.dev_id);
1300 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 return -ENODEV;
1302
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001303 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001304 err = -EBUSY;
1305 goto done;
1306 }
1307
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001308 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001309 err = -EOPNOTSUPP;
1310 goto done;
1311 }
1312
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001313 if (hdev->dev_type != HCI_BREDR) {
1314 err = -EOPNOTSUPP;
1315 goto done;
1316 }
1317
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001318 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001319 err = -EOPNOTSUPP;
1320 goto done;
1321 }
1322
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001323 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001324 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001325 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001326 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 do_inquiry = 1;
1328 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001329 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330
Marcel Holtmann04837f62006-07-03 10:02:33 +02001331 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001332
1333 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001334 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1335 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001336 if (err < 0)
1337 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001338
1339 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1340 * cleared). If it is interrupted by a signal, return -EINTR.
1341 */
NeilBrown74316202014-07-07 15:16:04 +10001342 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001343 TASK_INTERRUPTIBLE))
1344 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001347 /* for unlimited number of responses we will use buffer with
1348 * 255 entries
1349 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1351
1352 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1353 * copy it to the user space.
1354 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001355 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001356 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 err = -ENOMEM;
1358 goto done;
1359 }
1360
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001361 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001363 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
1365 BT_DBG("num_rsp %d", ir.num_rsp);
1366
1367 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1368 ptr += sizeof(ir);
1369 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001370 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001372 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 err = -EFAULT;
1374
1375 kfree(buf);
1376
1377done:
1378 hci_dev_put(hdev);
1379 return err;
1380}
1381
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001382static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 int ret = 0;
1385
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 BT_DBG("%s %p", hdev->name, hdev);
1387
1388 hci_req_lock(hdev);
1389
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001390 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001391 ret = -ENODEV;
1392 goto done;
1393 }
1394
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001395 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1396 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001397 /* Check for rfkill but allow the HCI setup stage to
1398 * proceed (which in itself doesn't cause any RF activity).
1399 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001400 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001401 ret = -ERFKILL;
1402 goto done;
1403 }
1404
1405 /* Check for valid public address or a configured static
1406 * random adddress, but let the HCI setup proceed to
1407 * be able to determine if there is a public address
1408 * or not.
1409 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001410 * In case of user channel usage, it is not important
1411 * if a public address or static random address is
1412 * available.
1413 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001414 * This check is only valid for BR/EDR controllers
1415 * since AMP controllers do not have an address.
1416 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001417 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001418 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001419 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1420 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1421 ret = -EADDRNOTAVAIL;
1422 goto done;
1423 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001424 }
1425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 if (test_bit(HCI_UP, &hdev->flags)) {
1427 ret = -EALREADY;
1428 goto done;
1429 }
1430
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 if (hdev->open(hdev)) {
1432 ret = -EIO;
1433 goto done;
1434 }
1435
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001436 atomic_set(&hdev->cmd_cnt, 1);
1437 set_bit(HCI_INIT, &hdev->flags);
1438
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001439 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001440 if (hdev->setup)
1441 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001442
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001443 /* The transport driver can set these quirks before
1444 * creating the HCI device or in its setup callback.
1445 *
1446 * In case any of them is set, the controller has to
1447 * start up as unconfigured.
1448 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001449 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1450 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001451 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001452
1453 /* For an unconfigured controller it is required to
1454 * read at least the version information provided by
1455 * the Read Local Version Information command.
1456 *
1457 * If the set_bdaddr driver callback is provided, then
1458 * also the original Bluetooth public device address
1459 * will be read using the Read BD Address command.
1460 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001461 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001462 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001463 }
1464
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001465 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001466 /* If public address change is configured, ensure that
1467 * the address gets programmed. If the driver does not
1468 * support changing the public address, fail the power
1469 * on procedure.
1470 */
1471 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1472 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001473 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1474 else
1475 ret = -EADDRNOTAVAIL;
1476 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001477
1478 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001479 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1480 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001481 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 }
1483
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001484 clear_bit(HCI_INIT, &hdev->flags);
1485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 if (!ret) {
1487 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001488 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 set_bit(HCI_UP, &hdev->flags);
1490 hci_notify(hdev, HCI_DEV_UP);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001491 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1492 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1493 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1494 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001495 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001496 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001497 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001498 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001499 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001500 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001502 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001503 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001504 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
1506 skb_queue_purge(&hdev->cmd_q);
1507 skb_queue_purge(&hdev->rx_q);
1508
1509 if (hdev->flush)
1510 hdev->flush(hdev);
1511
1512 if (hdev->sent_cmd) {
1513 kfree_skb(hdev->sent_cmd);
1514 hdev->sent_cmd = NULL;
1515 }
1516
1517 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001518 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 }
1520
1521done:
1522 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 return ret;
1524}
1525
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001526/* ---- HCI ioctl helpers ---- */
1527
1528int hci_dev_open(__u16 dev)
1529{
1530 struct hci_dev *hdev;
1531 int err;
1532
1533 hdev = hci_dev_get(dev);
1534 if (!hdev)
1535 return -ENODEV;
1536
Marcel Holtmann4a964402014-07-02 19:10:33 +02001537 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001538 * up as user channel. Trying to bring them up as normal devices
1539 * will result into a failure. Only user channel operation is
1540 * possible.
1541 *
1542 * When this function is called for a user channel, the flag
1543 * HCI_USER_CHANNEL will be set first before attempting to
1544 * open the device.
1545 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001546 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1547 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001548 err = -EOPNOTSUPP;
1549 goto done;
1550 }
1551
Johan Hedberge1d08f42013-10-01 22:44:50 +03001552 /* We need to ensure that no other power on/off work is pending
1553 * before proceeding to call hci_dev_do_open. This is
1554 * particularly important if the setup procedure has not yet
1555 * completed.
1556 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001557 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001558 cancel_delayed_work(&hdev->power_off);
1559
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001560 /* After this call it is guaranteed that the setup procedure
1561 * has finished. This means that error conditions like RFKILL
1562 * or no valid public or static random address apply.
1563 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001564 flush_workqueue(hdev->req_workqueue);
1565
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001566 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001567 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001568 * so that pairing works for them. Once the management interface
1569 * is in use this bit will be cleared again and userspace has
1570 * to explicitly enable it.
1571 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001572 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1573 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001574 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001575
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001576 err = hci_dev_do_open(hdev);
1577
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001578done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001579 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001580 return err;
1581}
1582
Johan Hedbergd7347f32014-07-04 12:37:23 +03001583/* This function requires the caller holds hdev->lock */
1584static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1585{
1586 struct hci_conn_params *p;
1587
Johan Hedbergf161dd42014-08-15 21:06:54 +03001588 list_for_each_entry(p, &hdev->le_conn_params, list) {
1589 if (p->conn) {
1590 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001591 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001592 p->conn = NULL;
1593 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001594 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001595 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001596
1597 BT_DBG("All LE pending actions cleared");
1598}
1599
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600static int hci_dev_do_close(struct hci_dev *hdev)
1601{
1602 BT_DBG("%s %p", hdev->name, hdev);
1603
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001604 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001605 /* Execute vendor specific shutdown routine */
1606 if (hdev->shutdown)
1607 hdev->shutdown(hdev);
1608 }
1609
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001610 cancel_delayed_work(&hdev->power_off);
1611
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 hci_req_cancel(hdev, ENODEV);
1613 hci_req_lock(hdev);
1614
1615 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001616 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 hci_req_unlock(hdev);
1618 return 0;
1619 }
1620
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001621 /* Flush RX and TX works */
1622 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001623 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001625 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001626 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001627 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001628 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1629 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001630 }
1631
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001632 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001633 cancel_delayed_work(&hdev->service_cache);
1634
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001635 cancel_delayed_work_sync(&hdev->le_scan_disable);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08001636 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001637
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001638 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001639 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001640
Johan Hedberg76727c02014-11-18 09:00:14 +02001641 /* Avoid potential lockdep warnings from the *_flush() calls by
1642 * ensuring the workqueue is empty up front.
1643 */
1644 drain_workqueue(hdev->workqueue);
1645
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001646 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001647
Johan Hedberg8f502f82015-01-28 19:56:02 +02001648 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1649
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001650 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001651 if (hdev->dev_type == HCI_BREDR)
1652 mgmt_powered(hdev, 0);
1653 }
1654
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001655 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001656 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001657 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001658 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659
Marcel Holtmann64dae962015-01-28 14:10:28 -08001660 smp_unregister(hdev);
1661
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 hci_notify(hdev, HCI_DEV_DOWN);
1663
1664 if (hdev->flush)
1665 hdev->flush(hdev);
1666
1667 /* Reset device */
1668 skb_queue_purge(&hdev->cmd_q);
1669 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001670 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1671 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001672 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001674 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 clear_bit(HCI_INIT, &hdev->flags);
1676 }
1677
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001678 /* flush cmd work */
1679 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
1681 /* Drop queues */
1682 skb_queue_purge(&hdev->rx_q);
1683 skb_queue_purge(&hdev->cmd_q);
1684 skb_queue_purge(&hdev->raw_q);
1685
1686 /* Drop last sent command */
1687 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001688 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 kfree_skb(hdev->sent_cmd);
1690 hdev->sent_cmd = NULL;
1691 }
1692
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001693 kfree_skb(hdev->recv_evt);
1694 hdev->recv_evt = NULL;
1695
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 /* After this point our queues are empty
1697 * and no tasks are scheduled. */
1698 hdev->close(hdev);
1699
Johan Hedberg35b973c2013-03-15 17:06:59 -05001700 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001701 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001702 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001703
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001704 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001705 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001706
Johan Hedberge59fda82012-02-22 18:11:53 +02001707 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001708 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001709 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001710
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 hci_req_unlock(hdev);
1712
1713 hci_dev_put(hdev);
1714 return 0;
1715}
1716
1717int hci_dev_close(__u16 dev)
1718{
1719 struct hci_dev *hdev;
1720 int err;
1721
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001722 hdev = hci_dev_get(dev);
1723 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001725
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001726 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001727 err = -EBUSY;
1728 goto done;
1729 }
1730
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001731 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001732 cancel_delayed_work(&hdev->power_off);
1733
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001735
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001736done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 hci_dev_put(hdev);
1738 return err;
1739}
1740
Marcel Holtmann5c912492015-01-28 11:53:05 -08001741static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001743 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
Marcel Holtmann5c912492015-01-28 11:53:05 -08001745 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
1747 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 /* Drop queues */
1750 skb_queue_purge(&hdev->rx_q);
1751 skb_queue_purge(&hdev->cmd_q);
1752
Johan Hedberg76727c02014-11-18 09:00:14 +02001753 /* Avoid potential lockdep warnings from the *_flush() calls by
1754 * ensuring the workqueue is empty up front.
1755 */
1756 drain_workqueue(hdev->workqueue);
1757
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001758 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001759 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001761 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762
1763 if (hdev->flush)
1764 hdev->flush(hdev);
1765
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001766 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001767 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001769 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 return ret;
1773}
1774
Marcel Holtmann5c912492015-01-28 11:53:05 -08001775int hci_dev_reset(__u16 dev)
1776{
1777 struct hci_dev *hdev;
1778 int err;
1779
1780 hdev = hci_dev_get(dev);
1781 if (!hdev)
1782 return -ENODEV;
1783
1784 if (!test_bit(HCI_UP, &hdev->flags)) {
1785 err = -ENETDOWN;
1786 goto done;
1787 }
1788
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001789 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001790 err = -EBUSY;
1791 goto done;
1792 }
1793
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001794 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001795 err = -EOPNOTSUPP;
1796 goto done;
1797 }
1798
1799 err = hci_dev_do_reset(hdev);
1800
1801done:
1802 hci_dev_put(hdev);
1803 return err;
1804}
1805
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806int hci_dev_reset_stat(__u16 dev)
1807{
1808 struct hci_dev *hdev;
1809 int ret = 0;
1810
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001811 hdev = hci_dev_get(dev);
1812 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 return -ENODEV;
1814
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001815 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001816 ret = -EBUSY;
1817 goto done;
1818 }
1819
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001820 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001821 ret = -EOPNOTSUPP;
1822 goto done;
1823 }
1824
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1826
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001827done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 return ret;
1830}
1831
Johan Hedberg123abc02014-07-10 12:09:07 +03001832static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1833{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001834 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001835
1836 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1837
1838 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001839 conn_changed = !hci_dev_test_and_set_flag(hdev,
1840 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001841 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001842 conn_changed = hci_dev_test_and_clear_flag(hdev,
1843 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001844
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001845 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001846 discov_changed = !hci_dev_test_and_set_flag(hdev,
1847 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001848 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001849 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001850 discov_changed = hci_dev_test_and_clear_flag(hdev,
1851 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001852 }
1853
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001854 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001855 return;
1856
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001857 if (conn_changed || discov_changed) {
1858 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001859 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001860
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001861 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001862 mgmt_update_adv_data(hdev);
1863
Johan Hedberg123abc02014-07-10 12:09:07 +03001864 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001865 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001866}
1867
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868int hci_dev_cmd(unsigned int cmd, void __user *arg)
1869{
1870 struct hci_dev *hdev;
1871 struct hci_dev_req dr;
1872 int err = 0;
1873
1874 if (copy_from_user(&dr, arg, sizeof(dr)))
1875 return -EFAULT;
1876
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001877 hdev = hci_dev_get(dr.dev_id);
1878 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 return -ENODEV;
1880
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001881 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001882 err = -EBUSY;
1883 goto done;
1884 }
1885
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001886 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001887 err = -EOPNOTSUPP;
1888 goto done;
1889 }
1890
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001891 if (hdev->dev_type != HCI_BREDR) {
1892 err = -EOPNOTSUPP;
1893 goto done;
1894 }
1895
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001896 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001897 err = -EOPNOTSUPP;
1898 goto done;
1899 }
1900
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 switch (cmd) {
1902 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001903 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1904 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 break;
1906
1907 case HCISETENCRYPT:
1908 if (!lmp_encrypt_capable(hdev)) {
1909 err = -EOPNOTSUPP;
1910 break;
1911 }
1912
1913 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1914 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001915 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1916 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 if (err)
1918 break;
1919 }
1920
Johan Hedberg01178cd2013-03-05 20:37:41 +02001921 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1922 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 break;
1924
1925 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001926 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1927 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001928
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001929 /* Ensure that the connectable and discoverable states
1930 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001931 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001932 if (!err)
1933 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 break;
1935
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001936 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001937 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1938 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001939 break;
1940
1941 case HCISETLINKMODE:
1942 hdev->link_mode = ((__u16) dr.dev_opt) &
1943 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1944 break;
1945
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 case HCISETPTYPE:
1947 hdev->pkt_type = (__u16) dr.dev_opt;
1948 break;
1949
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001951 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1952 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 break;
1954
1955 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001956 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1957 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 break;
1959
1960 default:
1961 err = -EINVAL;
1962 break;
1963 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001964
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001965done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 hci_dev_put(hdev);
1967 return err;
1968}
1969
1970int hci_get_dev_list(void __user *arg)
1971{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001972 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 struct hci_dev_list_req *dl;
1974 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 int n = 0, size, err;
1976 __u16 dev_num;
1977
1978 if (get_user(dev_num, (__u16 __user *) arg))
1979 return -EFAULT;
1980
1981 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1982 return -EINVAL;
1983
1984 size = sizeof(*dl) + dev_num * sizeof(*dr);
1985
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001986 dl = kzalloc(size, GFP_KERNEL);
1987 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 return -ENOMEM;
1989
1990 dr = dl->dev_req;
1991
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001992 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001993 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001994 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001995
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001996 /* When the auto-off is configured it means the transport
1997 * is running, but in that case still indicate that the
1998 * device is actually down.
1999 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002000 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002001 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002002
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002004 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002005
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 if (++n >= dev_num)
2007 break;
2008 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002009 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
2011 dl->dev_num = n;
2012 size = sizeof(*dl) + n * sizeof(*dr);
2013
2014 err = copy_to_user(arg, dl, size);
2015 kfree(dl);
2016
2017 return err ? -EFAULT : 0;
2018}
2019
2020int hci_get_dev_info(void __user *arg)
2021{
2022 struct hci_dev *hdev;
2023 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002024 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 int err = 0;
2026
2027 if (copy_from_user(&di, arg, sizeof(di)))
2028 return -EFAULT;
2029
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002030 hdev = hci_dev_get(di.dev_id);
2031 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 return -ENODEV;
2033
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002034 /* When the auto-off is configured it means the transport
2035 * is running, but in that case still indicate that the
2036 * device is actually down.
2037 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002038 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002039 flags = hdev->flags & ~BIT(HCI_UP);
2040 else
2041 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002042
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 strcpy(di.name, hdev->name);
2044 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002045 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002046 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002048 if (lmp_bredr_capable(hdev)) {
2049 di.acl_mtu = hdev->acl_mtu;
2050 di.acl_pkts = hdev->acl_pkts;
2051 di.sco_mtu = hdev->sco_mtu;
2052 di.sco_pkts = hdev->sco_pkts;
2053 } else {
2054 di.acl_mtu = hdev->le_mtu;
2055 di.acl_pkts = hdev->le_pkts;
2056 di.sco_mtu = 0;
2057 di.sco_pkts = 0;
2058 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 di.link_policy = hdev->link_policy;
2060 di.link_mode = hdev->link_mode;
2061
2062 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2063 memcpy(&di.features, &hdev->features, sizeof(di.features));
2064
2065 if (copy_to_user(arg, &di, sizeof(di)))
2066 err = -EFAULT;
2067
2068 hci_dev_put(hdev);
2069
2070 return err;
2071}
2072
2073/* ---- Interface to HCI drivers ---- */
2074
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002075static int hci_rfkill_set_block(void *data, bool blocked)
2076{
2077 struct hci_dev *hdev = data;
2078
2079 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2080
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002081 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002082 return -EBUSY;
2083
Johan Hedberg5e130362013-09-13 08:58:17 +03002084 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002085 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002086 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2087 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002088 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002089 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002090 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002091 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002092
2093 return 0;
2094}
2095
2096static const struct rfkill_ops hci_rfkill_ops = {
2097 .set_block = hci_rfkill_set_block,
2098};
2099
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002100static void hci_power_on(struct work_struct *work)
2101{
2102 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002103 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002104
2105 BT_DBG("%s", hdev->name);
2106
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002107 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002108 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302109 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002110 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302111 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002112 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002113 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002114
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002115 /* During the HCI setup phase, a few error conditions are
2116 * ignored and they need to be checked now. If they are still
2117 * valid, it is important to turn the device back off.
2118 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002119 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2120 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002121 (hdev->dev_type == HCI_BREDR &&
2122 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2123 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002124 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002125 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002126 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002127 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2128 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002129 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002130
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002131 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002132 /* For unconfigured devices, set the HCI_RAW flag
2133 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002134 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002135 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002136 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002137
2138 /* For fully configured devices, this will send
2139 * the Index Added event. For unconfigured devices,
2140 * it will send Unconfigued Index Added event.
2141 *
2142 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2143 * and no event will be send.
2144 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002145 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002146 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002147 /* When the controller is now configured, then it
2148 * is important to clear the HCI_RAW flag.
2149 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002150 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002151 clear_bit(HCI_RAW, &hdev->flags);
2152
Marcel Holtmannd603b762014-07-06 12:11:14 +02002153 /* Powering on the controller with HCI_CONFIG set only
2154 * happens with the transition from unconfigured to
2155 * configured. This will send the Index Added event.
2156 */
2157 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002158 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002159}
2160
2161static void hci_power_off(struct work_struct *work)
2162{
Johan Hedberg32435532011-11-07 22:16:04 +02002163 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002164 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002165
2166 BT_DBG("%s", hdev->name);
2167
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002168 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002169}
2170
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002171static void hci_error_reset(struct work_struct *work)
2172{
2173 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2174
2175 BT_DBG("%s", hdev->name);
2176
2177 if (hdev->hw_error)
2178 hdev->hw_error(hdev, hdev->hw_error_code);
2179 else
2180 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2181 hdev->hw_error_code);
2182
2183 if (hci_dev_do_close(hdev))
2184 return;
2185
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002186 hci_dev_do_open(hdev);
2187}
2188
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002189static void hci_discov_off(struct work_struct *work)
2190{
2191 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002192
2193 hdev = container_of(work, struct hci_dev, discov_off.work);
2194
2195 BT_DBG("%s", hdev->name);
2196
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002197 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002198}
2199
Johan Hedberg35f74982014-02-18 17:14:32 +02002200void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002201{
Johan Hedberg48210022013-01-27 00:31:28 +02002202 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002203
Johan Hedberg48210022013-01-27 00:31:28 +02002204 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2205 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002206 kfree(uuid);
2207 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002208}
2209
Johan Hedberg35f74982014-02-18 17:14:32 +02002210void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002211{
Johan Hedberg0378b592014-11-19 15:22:22 +02002212 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002213
Johan Hedberg0378b592014-11-19 15:22:22 +02002214 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2215 list_del_rcu(&key->list);
2216 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002217 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002218}
2219
Johan Hedberg35f74982014-02-18 17:14:32 +02002220void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002221{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002222 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002223
Johan Hedberg970d0f12014-11-13 14:37:47 +02002224 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2225 list_del_rcu(&k->list);
2226 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002227 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002228}
2229
Johan Hedberg970c4e42014-02-18 10:19:33 +02002230void hci_smp_irks_clear(struct hci_dev *hdev)
2231{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002232 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002233
Johan Hedbergadae20c2014-11-13 14:37:48 +02002234 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2235 list_del_rcu(&k->list);
2236 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002237 }
2238}
2239
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002240struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2241{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002242 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002243
Johan Hedberg0378b592014-11-19 15:22:22 +02002244 rcu_read_lock();
2245 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2246 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2247 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002248 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002249 }
2250 }
2251 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002252
2253 return NULL;
2254}
2255
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302256static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002257 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002258{
2259 /* Legacy key */
2260 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302261 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002262
2263 /* Debug keys are insecure so don't store them persistently */
2264 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302265 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002266
2267 /* Changed combination key and there's no previous one */
2268 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302269 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002270
2271 /* Security mode 3 case */
2272 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302273 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002274
Johan Hedberge3befab2014-06-01 16:33:39 +03002275 /* BR/EDR key derived using SC from an LE link */
2276 if (conn->type == LE_LINK)
2277 return true;
2278
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002279 /* Neither local nor remote side had no-bonding as requirement */
2280 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302281 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002282
2283 /* Local side had dedicated bonding as requirement */
2284 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302285 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002286
2287 /* Remote side had dedicated bonding as requirement */
2288 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302289 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002290
2291 /* If none of the above criteria match, then don't store the key
2292 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302293 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002294}
2295
Johan Hedberge804d252014-07-16 11:42:28 +03002296static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002297{
Johan Hedberge804d252014-07-16 11:42:28 +03002298 if (type == SMP_LTK)
2299 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002300
Johan Hedberge804d252014-07-16 11:42:28 +03002301 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002302}
2303
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002304struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2305 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002306{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002307 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002308
Johan Hedberg970d0f12014-11-13 14:37:47 +02002309 rcu_read_lock();
2310 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002311 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2312 continue;
2313
Johan Hedberg923e2412014-12-03 12:43:39 +02002314 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002315 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002316 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002317 }
2318 }
2319 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002320
2321 return NULL;
2322}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002323
Johan Hedberg970c4e42014-02-18 10:19:33 +02002324struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2325{
2326 struct smp_irk *irk;
2327
Johan Hedbergadae20c2014-11-13 14:37:48 +02002328 rcu_read_lock();
2329 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2330 if (!bacmp(&irk->rpa, rpa)) {
2331 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002332 return irk;
2333 }
2334 }
2335
Johan Hedbergadae20c2014-11-13 14:37:48 +02002336 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2337 if (smp_irk_matches(hdev, irk->val, rpa)) {
2338 bacpy(&irk->rpa, rpa);
2339 rcu_read_unlock();
2340 return irk;
2341 }
2342 }
2343 rcu_read_unlock();
2344
Johan Hedberg970c4e42014-02-18 10:19:33 +02002345 return NULL;
2346}
2347
2348struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2349 u8 addr_type)
2350{
2351 struct smp_irk *irk;
2352
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002353 /* Identity Address must be public or static random */
2354 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2355 return NULL;
2356
Johan Hedbergadae20c2014-11-13 14:37:48 +02002357 rcu_read_lock();
2358 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002359 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002360 bacmp(bdaddr, &irk->bdaddr) == 0) {
2361 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002362 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002363 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002364 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002365 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002366
2367 return NULL;
2368}
2369
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002370struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002371 bdaddr_t *bdaddr, u8 *val, u8 type,
2372 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002373{
2374 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302375 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002376
2377 old_key = hci_find_link_key(hdev, bdaddr);
2378 if (old_key) {
2379 old_key_type = old_key->type;
2380 key = old_key;
2381 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002382 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002383 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002384 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002385 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002386 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002387 }
2388
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002389 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002390
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002391 /* Some buggy controller combinations generate a changed
2392 * combination key for legacy pairing even when there's no
2393 * previous key */
2394 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002395 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002396 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002397 if (conn)
2398 conn->key_type = type;
2399 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002400
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002401 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002402 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002403 key->pin_len = pin_len;
2404
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002405 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002406 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002407 else
2408 key->type = type;
2409
Johan Hedberg7652ff62014-06-24 13:15:49 +03002410 if (persistent)
2411 *persistent = hci_persistent_key(hdev, conn, type,
2412 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002413
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002414 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002415}
2416
Johan Hedbergca9142b2014-02-19 14:57:44 +02002417struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002418 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002419 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002420{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002421 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002422 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002423
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002424 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002425 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002426 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002427 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002428 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002429 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002430 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002431 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002432 }
2433
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002434 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002435 key->bdaddr_type = addr_type;
2436 memcpy(key->val, tk, sizeof(key->val));
2437 key->authenticated = authenticated;
2438 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002439 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002440 key->enc_size = enc_size;
2441 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002442
Johan Hedbergca9142b2014-02-19 14:57:44 +02002443 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002444}
2445
Johan Hedbergca9142b2014-02-19 14:57:44 +02002446struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2447 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002448{
2449 struct smp_irk *irk;
2450
2451 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2452 if (!irk) {
2453 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2454 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002455 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002456
2457 bacpy(&irk->bdaddr, bdaddr);
2458 irk->addr_type = addr_type;
2459
Johan Hedbergadae20c2014-11-13 14:37:48 +02002460 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002461 }
2462
2463 memcpy(irk->val, val, 16);
2464 bacpy(&irk->rpa, rpa);
2465
Johan Hedbergca9142b2014-02-19 14:57:44 +02002466 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002467}
2468
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002469int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2470{
2471 struct link_key *key;
2472
2473 key = hci_find_link_key(hdev, bdaddr);
2474 if (!key)
2475 return -ENOENT;
2476
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002477 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002478
Johan Hedberg0378b592014-11-19 15:22:22 +02002479 list_del_rcu(&key->list);
2480 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002481
2482 return 0;
2483}
2484
Johan Hedberge0b2b272014-02-18 17:14:31 +02002485int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002486{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002487 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002488 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002489
Johan Hedberg970d0f12014-11-13 14:37:47 +02002490 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002491 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002492 continue;
2493
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002494 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002495
Johan Hedberg970d0f12014-11-13 14:37:47 +02002496 list_del_rcu(&k->list);
2497 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002498 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002499 }
2500
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002501 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002502}
2503
Johan Hedberga7ec7332014-02-18 17:14:35 +02002504void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2505{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002506 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002507
Johan Hedbergadae20c2014-11-13 14:37:48 +02002508 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002509 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2510 continue;
2511
2512 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2513
Johan Hedbergadae20c2014-11-13 14:37:48 +02002514 list_del_rcu(&k->list);
2515 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002516 }
2517}
2518
Johan Hedberg55e76b32015-03-10 22:34:40 +02002519bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2520{
2521 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002522 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002523 u8 addr_type;
2524
2525 if (type == BDADDR_BREDR) {
2526 if (hci_find_link_key(hdev, bdaddr))
2527 return true;
2528 return false;
2529 }
2530
2531 /* Convert to HCI addr type which struct smp_ltk uses */
2532 if (type == BDADDR_LE_PUBLIC)
2533 addr_type = ADDR_LE_DEV_PUBLIC;
2534 else
2535 addr_type = ADDR_LE_DEV_RANDOM;
2536
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002537 irk = hci_get_irk(hdev, bdaddr, addr_type);
2538 if (irk) {
2539 bdaddr = &irk->bdaddr;
2540 addr_type = irk->addr_type;
2541 }
2542
Johan Hedberg55e76b32015-03-10 22:34:40 +02002543 rcu_read_lock();
2544 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002545 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2546 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002547 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002548 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002549 }
2550 rcu_read_unlock();
2551
2552 return false;
2553}
2554
Ville Tervo6bd32322011-02-16 16:32:41 +02002555/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002556static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002557{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002558 struct hci_dev *hdev = container_of(work, struct hci_dev,
2559 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002560
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002561 if (hdev->sent_cmd) {
2562 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2563 u16 opcode = __le16_to_cpu(sent->opcode);
2564
2565 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2566 } else {
2567 BT_ERR("%s command tx timeout", hdev->name);
2568 }
2569
Ville Tervo6bd32322011-02-16 16:32:41 +02002570 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002571 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002572}
2573
Szymon Janc2763eda2011-03-22 13:12:22 +01002574struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002575 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002576{
2577 struct oob_data *data;
2578
Johan Hedberg6928a922014-10-26 20:46:09 +01002579 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2580 if (bacmp(bdaddr, &data->bdaddr) != 0)
2581 continue;
2582 if (data->bdaddr_type != bdaddr_type)
2583 continue;
2584 return data;
2585 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002586
2587 return NULL;
2588}
2589
Johan Hedberg6928a922014-10-26 20:46:09 +01002590int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2591 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002592{
2593 struct oob_data *data;
2594
Johan Hedberg6928a922014-10-26 20:46:09 +01002595 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002596 if (!data)
2597 return -ENOENT;
2598
Johan Hedberg6928a922014-10-26 20:46:09 +01002599 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002600
2601 list_del(&data->list);
2602 kfree(data);
2603
2604 return 0;
2605}
2606
Johan Hedberg35f74982014-02-18 17:14:32 +02002607void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002608{
2609 struct oob_data *data, *n;
2610
2611 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2612 list_del(&data->list);
2613 kfree(data);
2614 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002615}
2616
Marcel Holtmann07988722014-01-10 02:07:29 -08002617int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002618 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002619 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002620{
2621 struct oob_data *data;
2622
Johan Hedberg6928a922014-10-26 20:46:09 +01002623 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002624 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002625 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002626 if (!data)
2627 return -ENOMEM;
2628
2629 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002630 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002631 list_add(&data->list, &hdev->remote_oob_data);
2632 }
2633
Johan Hedberg81328d52014-10-26 20:33:47 +01002634 if (hash192 && rand192) {
2635 memcpy(data->hash192, hash192, sizeof(data->hash192));
2636 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002637 if (hash256 && rand256)
2638 data->present = 0x03;
Johan Hedberg81328d52014-10-26 20:33:47 +01002639 } else {
2640 memset(data->hash192, 0, sizeof(data->hash192));
2641 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002642 if (hash256 && rand256)
2643 data->present = 0x02;
2644 else
2645 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002646 }
2647
Johan Hedberg81328d52014-10-26 20:33:47 +01002648 if (hash256 && rand256) {
2649 memcpy(data->hash256, hash256, sizeof(data->hash256));
2650 memcpy(data->rand256, rand256, sizeof(data->rand256));
2651 } else {
2652 memset(data->hash256, 0, sizeof(data->hash256));
2653 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002654 if (hash192 && rand192)
2655 data->present = 0x01;
Johan Hedberg81328d52014-10-26 20:33:47 +01002656 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002657
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002658 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002659
2660 return 0;
2661}
2662
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002663struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002664 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002665{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002666 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002667
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002668 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002669 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002670 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002671 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002672
2673 return NULL;
2674}
2675
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002676void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002677{
2678 struct list_head *p, *n;
2679
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002680 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002681 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002682
2683 list_del(p);
2684 kfree(b);
2685 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002686}
2687
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002688int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002689{
2690 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002691
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002692 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002693 return -EBADF;
2694
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002695 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002696 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002697
Johan Hedberg27f70f32014-07-21 10:50:06 +03002698 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002699 if (!entry)
2700 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002701
2702 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002703 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002704
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002705 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002706
2707 return 0;
2708}
2709
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002710int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002711{
2712 struct bdaddr_list *entry;
2713
Johan Hedberg35f74982014-02-18 17:14:32 +02002714 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002715 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002716 return 0;
2717 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002718
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002719 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002720 if (!entry)
2721 return -ENOENT;
2722
2723 list_del(&entry->list);
2724 kfree(entry);
2725
2726 return 0;
2727}
2728
Andre Guedes15819a72014-02-03 13:56:18 -03002729/* This function requires the caller holds hdev->lock */
2730struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2731 bdaddr_t *addr, u8 addr_type)
2732{
2733 struct hci_conn_params *params;
2734
Johan Hedberg738f6182014-07-03 19:33:51 +03002735 /* The conn params list only contains identity addresses */
2736 if (!hci_is_identity_address(addr, addr_type))
2737 return NULL;
2738
Andre Guedes15819a72014-02-03 13:56:18 -03002739 list_for_each_entry(params, &hdev->le_conn_params, list) {
2740 if (bacmp(&params->addr, addr) == 0 &&
2741 params->addr_type == addr_type) {
2742 return params;
2743 }
2744 }
2745
2746 return NULL;
2747}
2748
2749/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002750struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2751 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002752{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002753 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002754
Johan Hedberg738f6182014-07-03 19:33:51 +03002755 /* The list only contains identity addresses */
2756 if (!hci_is_identity_address(addr, addr_type))
2757 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002758
Johan Hedberg501f8822014-07-04 12:37:26 +03002759 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002760 if (bacmp(&param->addr, addr) == 0 &&
2761 param->addr_type == addr_type)
2762 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002763 }
2764
2765 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002766}
2767
2768/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002769struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2770 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002771{
2772 struct hci_conn_params *params;
2773
Johan Hedbergc46245b2014-07-02 17:37:33 +03002774 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002775 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03002776
Andre Guedes15819a72014-02-03 13:56:18 -03002777 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002778 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002779 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002780
2781 params = kzalloc(sizeof(*params), GFP_KERNEL);
2782 if (!params) {
2783 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002784 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002785 }
2786
2787 bacpy(&params->addr, addr);
2788 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002789
2790 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002791 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002792
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002793 params->conn_min_interval = hdev->le_conn_min_interval;
2794 params->conn_max_interval = hdev->le_conn_max_interval;
2795 params->conn_latency = hdev->le_conn_latency;
2796 params->supervision_timeout = hdev->le_supv_timeout;
2797 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2798
2799 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2800
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002801 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002802}
2803
Johan Hedbergf6c63242014-08-15 21:06:59 +03002804static void hci_conn_params_free(struct hci_conn_params *params)
2805{
2806 if (params->conn) {
2807 hci_conn_drop(params->conn);
2808 hci_conn_put(params->conn);
2809 }
2810
2811 list_del(&params->action);
2812 list_del(&params->list);
2813 kfree(params);
2814}
2815
Andre Guedes15819a72014-02-03 13:56:18 -03002816/* This function requires the caller holds hdev->lock */
2817void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2818{
2819 struct hci_conn_params *params;
2820
2821 params = hci_conn_params_lookup(hdev, addr, addr_type);
2822 if (!params)
2823 return;
2824
Johan Hedbergf6c63242014-08-15 21:06:59 +03002825 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002826
Johan Hedberg95305ba2014-07-04 12:37:21 +03002827 hci_update_background_scan(hdev);
2828
Andre Guedes15819a72014-02-03 13:56:18 -03002829 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2830}
2831
2832/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03002833void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002834{
2835 struct hci_conn_params *params, *tmp;
2836
2837 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03002838 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2839 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03002840 list_del(&params->list);
2841 kfree(params);
2842 }
2843
Johan Hedberg55af49a2014-07-02 17:37:26 +03002844 BT_DBG("All LE disabled connection parameters were removed");
2845}
2846
2847/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03002848void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002849{
2850 struct hci_conn_params *params, *tmp;
2851
Johan Hedbergf6c63242014-08-15 21:06:59 +03002852 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2853 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002854
Johan Hedberga2f41a82014-07-04 12:37:19 +03002855 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02002856
Andre Guedes15819a72014-02-03 13:56:18 -03002857 BT_DBG("All LE connection parameters were removed");
2858}
2859
Marcel Holtmann1904a852015-01-11 13:50:44 -08002860static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002861{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002862 if (status) {
2863 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002864
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002865 hci_dev_lock(hdev);
2866 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2867 hci_dev_unlock(hdev);
2868 return;
2869 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002870}
2871
Marcel Holtmann1904a852015-01-11 13:50:44 -08002872static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2873 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002874{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002875 /* General inquiry access code (GIAC) */
2876 u8 lap[3] = { 0x33, 0x8b, 0x9e };
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002877 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002878 int err;
2879
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002880 if (status) {
2881 BT_ERR("Failed to disable LE scanning: status %d", status);
2882 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002883 }
2884
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002885 hdev->discovery.scan_start = 0;
2886
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002887 switch (hdev->discovery.type) {
2888 case DISCOV_TYPE_LE:
2889 hci_dev_lock(hdev);
2890 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2891 hci_dev_unlock(hdev);
2892 break;
2893
2894 case DISCOV_TYPE_INTERLEAVED:
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002895 hci_dev_lock(hdev);
2896
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002897 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2898 &hdev->quirks)) {
2899 /* If we were running LE only scan, change discovery
2900 * state. If we were running both LE and BR/EDR inquiry
2901 * simultaneously, and BR/EDR inquiry is already
2902 * finished, stop discovery, otherwise BR/EDR inquiry
2903 * will stop discovery when finished.
2904 */
2905 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2906 hci_discovery_set_state(hdev,
2907 DISCOVERY_STOPPED);
2908 } else {
Johan Hedbergbaf880a2015-03-21 08:02:23 +02002909 struct hci_request req;
2910
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002911 hci_inquiry_cache_flush(hdev);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002912
Johan Hedbergbaf880a2015-03-21 08:02:23 +02002913 hci_req_init(&req, hdev);
2914
2915 memset(&cp, 0, sizeof(cp));
2916 memcpy(&cp.lap, lap, sizeof(cp.lap));
2917 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2918 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2919
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002920 err = hci_req_run(&req, inquiry_complete);
2921 if (err) {
2922 BT_ERR("Inquiry request failed: err %d", err);
2923 hci_discovery_set_state(hdev,
2924 DISCOVERY_STOPPED);
2925 }
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002926 }
2927
2928 hci_dev_unlock(hdev);
2929 break;
2930 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002931}
2932
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002933static void le_scan_disable_work(struct work_struct *work)
2934{
2935 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002936 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002937 struct hci_request req;
2938 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002939
2940 BT_DBG("%s", hdev->name);
2941
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002942 cancel_delayed_work_sync(&hdev->le_scan_restart);
2943
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002944 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002945
Andre Guedesb1efcc22014-02-26 20:21:40 -03002946 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002947
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002948 err = hci_req_run(&req, le_scan_disable_work_complete);
2949 if (err)
2950 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002951}
2952
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002953static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2954 u16 opcode)
2955{
2956 unsigned long timeout, duration, scan_start, now;
2957
2958 BT_DBG("%s", hdev->name);
2959
2960 if (status) {
2961 BT_ERR("Failed to restart LE scan: status %d", status);
2962 return;
2963 }
2964
2965 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2966 !hdev->discovery.scan_start)
2967 return;
2968
2969 /* When the scan was started, hdev->le_scan_disable has been queued
2970 * after duration from scan_start. During scan restart this job
2971 * has been canceled, and we need to queue it again after proper
2972 * timeout, to make sure that scan does not run indefinitely.
2973 */
2974 duration = hdev->discovery.scan_duration;
2975 scan_start = hdev->discovery.scan_start;
2976 now = jiffies;
2977 if (now - scan_start <= duration) {
2978 int elapsed;
2979
2980 if (now >= scan_start)
2981 elapsed = now - scan_start;
2982 else
2983 elapsed = ULONG_MAX - scan_start + now;
2984
2985 timeout = duration - elapsed;
2986 } else {
2987 timeout = 0;
2988 }
2989 queue_delayed_work(hdev->workqueue,
2990 &hdev->le_scan_disable, timeout);
2991}
2992
2993static void le_scan_restart_work(struct work_struct *work)
2994{
2995 struct hci_dev *hdev = container_of(work, struct hci_dev,
2996 le_scan_restart.work);
2997 struct hci_request req;
2998 struct hci_cp_le_set_scan_enable cp;
2999 int err;
3000
3001 BT_DBG("%s", hdev->name);
3002
3003 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003004 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003005 return;
3006
3007 hci_req_init(&req, hdev);
3008
3009 hci_req_add_le_scan_disable(&req);
3010
3011 memset(&cp, 0, sizeof(cp));
3012 cp.enable = LE_SCAN_ENABLE;
3013 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3014 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3015
3016 err = hci_req_run(&req, le_scan_restart_work_complete);
3017 if (err)
3018 BT_ERR("Restart LE scan request failed: err %d", err);
3019}
3020
Johan Hedberga1f4c312014-02-27 14:05:41 +02003021/* Copy the Identity Address of the controller.
3022 *
3023 * If the controller has a public BD_ADDR, then by default use that one.
3024 * If this is a LE only controller without a public address, default to
3025 * the static random address.
3026 *
3027 * For debugging purposes it is possible to force controllers with a
3028 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003029 *
3030 * In case BR/EDR has been disabled on a dual-mode controller and
3031 * userspace has configured a static address, then that address
3032 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02003033 */
3034void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3035 u8 *bdaddr_type)
3036{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07003037 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003038 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003039 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01003040 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02003041 bacpy(bdaddr, &hdev->static_addr);
3042 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3043 } else {
3044 bacpy(bdaddr, &hdev->bdaddr);
3045 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3046 }
3047}
3048
David Herrmann9be0dab2012-04-22 14:39:57 +02003049/* Alloc HCI device */
3050struct hci_dev *hci_alloc_dev(void)
3051{
3052 struct hci_dev *hdev;
3053
Johan Hedberg27f70f32014-07-21 10:50:06 +03003054 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003055 if (!hdev)
3056 return NULL;
3057
David Herrmannb1b813d2012-04-22 14:39:58 +02003058 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3059 hdev->esco_type = (ESCO_HV1);
3060 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003061 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3062 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003063 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003064 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3065 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003066
David Herrmannb1b813d2012-04-22 14:39:58 +02003067 hdev->sniff_max_interval = 800;
3068 hdev->sniff_min_interval = 80;
3069
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003070 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003071 hdev->le_adv_min_interval = 0x0800;
3072 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003073 hdev->le_scan_interval = 0x0060;
3074 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003075 hdev->le_conn_min_interval = 0x0028;
3076 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003077 hdev->le_conn_latency = 0x0000;
3078 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01003079 hdev->le_def_tx_len = 0x001b;
3080 hdev->le_def_tx_time = 0x0148;
3081 hdev->le_max_tx_len = 0x001b;
3082 hdev->le_max_tx_time = 0x0148;
3083 hdev->le_max_rx_len = 0x001b;
3084 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003085
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003086 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003087 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003088 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3089 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003090
David Herrmannb1b813d2012-04-22 14:39:58 +02003091 mutex_init(&hdev->lock);
3092 mutex_init(&hdev->req_lock);
3093
3094 INIT_LIST_HEAD(&hdev->mgmt_pending);
3095 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003096 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003097 INIT_LIST_HEAD(&hdev->uuids);
3098 INIT_LIST_HEAD(&hdev->link_keys);
3099 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003100 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003101 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003102 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003103 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003104 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003105 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003106 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003107
3108 INIT_WORK(&hdev->rx_work, hci_rx_work);
3109 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3110 INIT_WORK(&hdev->tx_work, hci_tx_work);
3111 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003112 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003113
David Herrmannb1b813d2012-04-22 14:39:58 +02003114 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3115 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3116 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003117 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
David Herrmannb1b813d2012-04-22 14:39:58 +02003118
David Herrmannb1b813d2012-04-22 14:39:58 +02003119 skb_queue_head_init(&hdev->rx_q);
3120 skb_queue_head_init(&hdev->cmd_q);
3121 skb_queue_head_init(&hdev->raw_q);
3122
3123 init_waitqueue_head(&hdev->req_wait_q);
3124
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003125 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003126
David Herrmannb1b813d2012-04-22 14:39:58 +02003127 hci_init_sysfs(hdev);
3128 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003129
3130 return hdev;
3131}
3132EXPORT_SYMBOL(hci_alloc_dev);
3133
3134/* Free HCI device */
3135void hci_free_dev(struct hci_dev *hdev)
3136{
David Herrmann9be0dab2012-04-22 14:39:57 +02003137 /* will free via device release */
3138 put_device(&hdev->dev);
3139}
3140EXPORT_SYMBOL(hci_free_dev);
3141
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142/* Register HCI device */
3143int hci_register_dev(struct hci_dev *hdev)
3144{
David Herrmannb1b813d2012-04-22 14:39:58 +02003145 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146
Marcel Holtmann74292d52014-07-06 15:50:27 +02003147 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 return -EINVAL;
3149
Mat Martineau08add512011-11-02 16:18:36 -07003150 /* Do not allow HCI_AMP devices to register at index 0,
3151 * so the index can be used as the AMP controller ID.
3152 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003153 switch (hdev->dev_type) {
3154 case HCI_BREDR:
3155 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3156 break;
3157 case HCI_AMP:
3158 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3159 break;
3160 default:
3161 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003163
Sasha Levin3df92b32012-05-27 22:36:56 +02003164 if (id < 0)
3165 return id;
3166
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167 sprintf(hdev->name, "hci%d", id);
3168 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003169
3170 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3171
Kees Cookd8537542013-07-03 15:04:57 -07003172 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3173 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003174 if (!hdev->workqueue) {
3175 error = -ENOMEM;
3176 goto err;
3177 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003178
Kees Cookd8537542013-07-03 15:04:57 -07003179 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3180 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003181 if (!hdev->req_workqueue) {
3182 destroy_workqueue(hdev->workqueue);
3183 error = -ENOMEM;
3184 goto err;
3185 }
3186
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003187 if (!IS_ERR_OR_NULL(bt_debugfs))
3188 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3189
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003190 dev_set_name(&hdev->dev, "%s", hdev->name);
3191
3192 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003193 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003194 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003196 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003197 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3198 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003199 if (hdev->rfkill) {
3200 if (rfkill_register(hdev->rfkill) < 0) {
3201 rfkill_destroy(hdev->rfkill);
3202 hdev->rfkill = NULL;
3203 }
3204 }
3205
Johan Hedberg5e130362013-09-13 08:58:17 +03003206 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003207 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003208
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003209 hci_dev_set_flag(hdev, HCI_SETUP);
3210 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003211
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003212 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003213 /* Assume BR/EDR support until proven otherwise (such as
3214 * through reading supported features during init.
3215 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003216 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003217 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003218
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003219 write_lock(&hci_dev_list_lock);
3220 list_add(&hdev->list, &hci_dev_list);
3221 write_unlock(&hci_dev_list_lock);
3222
Marcel Holtmann4a964402014-07-02 19:10:33 +02003223 /* Devices that are marked for raw-only usage are unconfigured
3224 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003225 */
3226 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003227 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003228
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003230 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231
Johan Hedberg19202572013-01-14 22:33:51 +02003232 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003233
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003235
David Herrmann33ca9542011-10-08 14:58:49 +02003236err_wqueue:
3237 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003238 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003239err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003240 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003241
David Herrmann33ca9542011-10-08 14:58:49 +02003242 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243}
3244EXPORT_SYMBOL(hci_register_dev);
3245
3246/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003247void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248{
Sasha Levin3df92b32012-05-27 22:36:56 +02003249 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003250
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003251 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003253 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003254
Sasha Levin3df92b32012-05-27 22:36:56 +02003255 id = hdev->id;
3256
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003257 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003259 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260
3261 hci_dev_do_close(hdev);
3262
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303263 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003264 kfree_skb(hdev->reassembly[i]);
3265
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003266 cancel_work_sync(&hdev->power_on);
3267
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003268 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003269 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3270 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003271 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003272 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003273 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003274 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003275
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003276 /* mgmt_index_removed should take care of emptying the
3277 * pending list */
3278 BUG_ON(!list_empty(&hdev->mgmt_pending));
3279
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 hci_notify(hdev, HCI_DEV_UNREG);
3281
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003282 if (hdev->rfkill) {
3283 rfkill_unregister(hdev->rfkill);
3284 rfkill_destroy(hdev->rfkill);
3285 }
3286
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003287 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003288
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003289 debugfs_remove_recursive(hdev->debugfs);
3290
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003291 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003292 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003293
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003294 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003295 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003296 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003297 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003298 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003299 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003300 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003301 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003302 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003303 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003304 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003305 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003306
David Herrmanndc946bd2012-01-07 15:47:24 +01003307 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003308
3309 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310}
3311EXPORT_SYMBOL(hci_unregister_dev);
3312
3313/* Suspend HCI device */
3314int hci_suspend_dev(struct hci_dev *hdev)
3315{
3316 hci_notify(hdev, HCI_DEV_SUSPEND);
3317 return 0;
3318}
3319EXPORT_SYMBOL(hci_suspend_dev);
3320
3321/* Resume HCI device */
3322int hci_resume_dev(struct hci_dev *hdev)
3323{
3324 hci_notify(hdev, HCI_DEV_RESUME);
3325 return 0;
3326}
3327EXPORT_SYMBOL(hci_resume_dev);
3328
Marcel Holtmann75e05692014-11-02 08:15:38 +01003329/* Reset HCI device */
3330int hci_reset_dev(struct hci_dev *hdev)
3331{
3332 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3333 struct sk_buff *skb;
3334
3335 skb = bt_skb_alloc(3, GFP_ATOMIC);
3336 if (!skb)
3337 return -ENOMEM;
3338
3339 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3340 memcpy(skb_put(skb, 3), hw_err, 3);
3341
3342 /* Send Hardware Error to upper stack */
3343 return hci_recv_frame(hdev, skb);
3344}
3345EXPORT_SYMBOL(hci_reset_dev);
3346
Marcel Holtmann76bca882009-11-18 00:40:39 +01003347/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003348int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003349{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003350 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003351 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003352 kfree_skb(skb);
3353 return -ENXIO;
3354 }
3355
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003356 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003357 bt_cb(skb)->incoming = 1;
3358
3359 /* Time stamp */
3360 __net_timestamp(skb);
3361
Marcel Holtmann76bca882009-11-18 00:40:39 +01003362 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003363 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003364
Marcel Holtmann76bca882009-11-18 00:40:39 +01003365 return 0;
3366}
3367EXPORT_SYMBOL(hci_recv_frame);
3368
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303369static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003370 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303371{
3372 int len = 0;
3373 int hlen = 0;
3374 int remain = count;
3375 struct sk_buff *skb;
3376 struct bt_skb_cb *scb;
3377
3378 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003379 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303380 return -EILSEQ;
3381
3382 skb = hdev->reassembly[index];
3383
3384 if (!skb) {
3385 switch (type) {
3386 case HCI_ACLDATA_PKT:
3387 len = HCI_MAX_FRAME_SIZE;
3388 hlen = HCI_ACL_HDR_SIZE;
3389 break;
3390 case HCI_EVENT_PKT:
3391 len = HCI_MAX_EVENT_SIZE;
3392 hlen = HCI_EVENT_HDR_SIZE;
3393 break;
3394 case HCI_SCODATA_PKT:
3395 len = HCI_MAX_SCO_SIZE;
3396 hlen = HCI_SCO_HDR_SIZE;
3397 break;
3398 }
3399
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003400 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303401 if (!skb)
3402 return -ENOMEM;
3403
3404 scb = (void *) skb->cb;
3405 scb->expect = hlen;
3406 scb->pkt_type = type;
3407
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303408 hdev->reassembly[index] = skb;
3409 }
3410
3411 while (count) {
3412 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003413 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303414
3415 memcpy(skb_put(skb, len), data, len);
3416
3417 count -= len;
3418 data += len;
3419 scb->expect -= len;
3420 remain = count;
3421
3422 switch (type) {
3423 case HCI_EVENT_PKT:
3424 if (skb->len == HCI_EVENT_HDR_SIZE) {
3425 struct hci_event_hdr *h = hci_event_hdr(skb);
3426 scb->expect = h->plen;
3427
3428 if (skb_tailroom(skb) < scb->expect) {
3429 kfree_skb(skb);
3430 hdev->reassembly[index] = NULL;
3431 return -ENOMEM;
3432 }
3433 }
3434 break;
3435
3436 case HCI_ACLDATA_PKT:
3437 if (skb->len == HCI_ACL_HDR_SIZE) {
3438 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3439 scb->expect = __le16_to_cpu(h->dlen);
3440
3441 if (skb_tailroom(skb) < scb->expect) {
3442 kfree_skb(skb);
3443 hdev->reassembly[index] = NULL;
3444 return -ENOMEM;
3445 }
3446 }
3447 break;
3448
3449 case HCI_SCODATA_PKT:
3450 if (skb->len == HCI_SCO_HDR_SIZE) {
3451 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3452 scb->expect = h->dlen;
3453
3454 if (skb_tailroom(skb) < scb->expect) {
3455 kfree_skb(skb);
3456 hdev->reassembly[index] = NULL;
3457 return -ENOMEM;
3458 }
3459 }
3460 break;
3461 }
3462
3463 if (scb->expect == 0) {
3464 /* Complete frame */
3465
3466 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003467 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303468
3469 hdev->reassembly[index] = NULL;
3470 return remain;
3471 }
3472 }
3473
3474 return remain;
3475}
3476
Suraj Sumangala99811512010-07-14 13:02:19 +05303477#define STREAM_REASSEMBLY 0
3478
3479int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3480{
3481 int type;
3482 int rem = 0;
3483
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003484 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303485 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3486
3487 if (!skb) {
3488 struct { char type; } *pkt;
3489
3490 /* Start of the frame */
3491 pkt = data;
3492 type = pkt->type;
3493
3494 data++;
3495 count--;
3496 } else
3497 type = bt_cb(skb)->pkt_type;
3498
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003499 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003500 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303501 if (rem < 0)
3502 return rem;
3503
3504 data += (count - rem);
3505 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003506 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303507
3508 return rem;
3509}
3510EXPORT_SYMBOL(hci_recv_stream_fragment);
3511
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512/* ---- Interface to upper protocols ---- */
3513
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514int hci_register_cb(struct hci_cb *cb)
3515{
3516 BT_DBG("%p name %s", cb, cb->name);
3517
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003518 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003519 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003520 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521
3522 return 0;
3523}
3524EXPORT_SYMBOL(hci_register_cb);
3525
3526int hci_unregister_cb(struct hci_cb *cb)
3527{
3528 BT_DBG("%p name %s", cb, cb->name);
3529
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003530 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003532 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533
3534 return 0;
3535}
3536EXPORT_SYMBOL(hci_unregister_cb);
3537
Marcel Holtmann51086992013-10-10 14:54:19 -07003538static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003540 int err;
3541
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003542 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003544 /* Time stamp */
3545 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003547 /* Send copy to monitor */
3548 hci_send_to_monitor(hdev, skb);
3549
3550 if (atomic_read(&hdev->promisc)) {
3551 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003552 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 }
3554
3555 /* Get rid of skb owner, prior to sending to the driver. */
3556 skb_orphan(skb);
3557
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003558 err = hdev->send(hdev, skb);
3559 if (err < 0) {
3560 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3561 kfree_skb(skb);
3562 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563}
3564
Marcel Holtmann899de762014-07-11 05:51:58 +02003565bool hci_req_pending(struct hci_dev *hdev)
3566{
3567 return (hdev->req_status == HCI_REQ_PEND);
3568}
3569
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003570/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003571int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3572 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003573{
3574 struct sk_buff *skb;
3575
3576 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3577
3578 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3579 if (!skb) {
3580 BT_ERR("%s no memory for command", hdev->name);
3581 return -ENOMEM;
3582 }
3583
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003584 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003585 * single-command requests.
3586 */
Eyal Birger6368c232015-03-01 14:58:26 +02003587 bt_cb(skb)->req_start = 1;
Johan Hedberg11714b32013-03-05 20:37:47 +02003588
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003590 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591
3592 return 0;
3593}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594
3595/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003596void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597{
3598 struct hci_command_hdr *hdr;
3599
3600 if (!hdev->sent_cmd)
3601 return NULL;
3602
3603 hdr = (void *) hdev->sent_cmd->data;
3604
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003605 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 return NULL;
3607
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003608 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609
3610 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3611}
3612
3613/* Send ACL data */
3614static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3615{
3616 struct hci_acl_hdr *hdr;
3617 int len = skb->len;
3618
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003619 skb_push(skb, HCI_ACL_HDR_SIZE);
3620 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003621 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003622 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3623 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624}
3625
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003626static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003627 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003629 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 struct hci_dev *hdev = conn->hdev;
3631 struct sk_buff *list;
3632
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003633 skb->len = skb_headlen(skb);
3634 skb->data_len = 0;
3635
3636 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003637
3638 switch (hdev->dev_type) {
3639 case HCI_BREDR:
3640 hci_add_acl_hdr(skb, conn->handle, flags);
3641 break;
3642 case HCI_AMP:
3643 hci_add_acl_hdr(skb, chan->handle, flags);
3644 break;
3645 default:
3646 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3647 return;
3648 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003649
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003650 list = skb_shinfo(skb)->frag_list;
3651 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652 /* Non fragmented */
3653 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3654
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003655 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656 } else {
3657 /* Fragmented */
3658 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3659
3660 skb_shinfo(skb)->frag_list = NULL;
3661
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003662 /* Queue all fragments atomically. We need to use spin_lock_bh
3663 * here because of 6LoWPAN links, as there this function is
3664 * called from softirq and using normal spin lock could cause
3665 * deadlocks.
3666 */
3667 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003669 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003670
3671 flags &= ~ACL_START;
3672 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673 do {
3674 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003675
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003676 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003677 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678
3679 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3680
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003681 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682 } while (list);
3683
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003684 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003686}
3687
3688void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3689{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003690 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003691
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003692 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003693
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003694 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003696 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698
3699/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003700void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701{
3702 struct hci_dev *hdev = conn->hdev;
3703 struct hci_sco_hdr hdr;
3704
3705 BT_DBG("%s len %d", hdev->name, skb->len);
3706
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003707 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708 hdr.dlen = skb->len;
3709
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003710 skb_push(skb, HCI_SCO_HDR_SIZE);
3711 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003712 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003714 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003715
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003717 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003718}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719
3720/* ---- HCI TX task (outgoing data) ---- */
3721
3722/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003723static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3724 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725{
3726 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003727 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003728 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003730 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003732
3733 rcu_read_lock();
3734
3735 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003736 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003738
3739 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3740 continue;
3741
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742 num++;
3743
3744 if (c->sent < min) {
3745 min = c->sent;
3746 conn = c;
3747 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003748
3749 if (hci_conn_num(hdev, type) == num)
3750 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 }
3752
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003753 rcu_read_unlock();
3754
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003756 int cnt, q;
3757
3758 switch (conn->type) {
3759 case ACL_LINK:
3760 cnt = hdev->acl_cnt;
3761 break;
3762 case SCO_LINK:
3763 case ESCO_LINK:
3764 cnt = hdev->sco_cnt;
3765 break;
3766 case LE_LINK:
3767 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3768 break;
3769 default:
3770 cnt = 0;
3771 BT_ERR("Unknown link type");
3772 }
3773
3774 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775 *quote = q ? q : 1;
3776 } else
3777 *quote = 0;
3778
3779 BT_DBG("conn %p quote %d", conn, *quote);
3780 return conn;
3781}
3782
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003783static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784{
3785 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003786 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787
Ville Tervobae1f5d92011-02-10 22:38:53 -03003788 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003790 rcu_read_lock();
3791
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003793 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003794 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003795 BT_ERR("%s killing stalled connection %pMR",
3796 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003797 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798 }
3799 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003800
3801 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802}
3803
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003804static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3805 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003806{
3807 struct hci_conn_hash *h = &hdev->conn_hash;
3808 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003809 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003810 struct hci_conn *conn;
3811 int cnt, q, conn_num = 0;
3812
3813 BT_DBG("%s", hdev->name);
3814
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003815 rcu_read_lock();
3816
3817 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003818 struct hci_chan *tmp;
3819
3820 if (conn->type != type)
3821 continue;
3822
3823 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3824 continue;
3825
3826 conn_num++;
3827
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003828 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003829 struct sk_buff *skb;
3830
3831 if (skb_queue_empty(&tmp->data_q))
3832 continue;
3833
3834 skb = skb_peek(&tmp->data_q);
3835 if (skb->priority < cur_prio)
3836 continue;
3837
3838 if (skb->priority > cur_prio) {
3839 num = 0;
3840 min = ~0;
3841 cur_prio = skb->priority;
3842 }
3843
3844 num++;
3845
3846 if (conn->sent < min) {
3847 min = conn->sent;
3848 chan = tmp;
3849 }
3850 }
3851
3852 if (hci_conn_num(hdev, type) == conn_num)
3853 break;
3854 }
3855
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003856 rcu_read_unlock();
3857
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003858 if (!chan)
3859 return NULL;
3860
3861 switch (chan->conn->type) {
3862 case ACL_LINK:
3863 cnt = hdev->acl_cnt;
3864 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003865 case AMP_LINK:
3866 cnt = hdev->block_cnt;
3867 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003868 case SCO_LINK:
3869 case ESCO_LINK:
3870 cnt = hdev->sco_cnt;
3871 break;
3872 case LE_LINK:
3873 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3874 break;
3875 default:
3876 cnt = 0;
3877 BT_ERR("Unknown link type");
3878 }
3879
3880 q = cnt / num;
3881 *quote = q ? q : 1;
3882 BT_DBG("chan %p quote %d", chan, *quote);
3883 return chan;
3884}
3885
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003886static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3887{
3888 struct hci_conn_hash *h = &hdev->conn_hash;
3889 struct hci_conn *conn;
3890 int num = 0;
3891
3892 BT_DBG("%s", hdev->name);
3893
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003894 rcu_read_lock();
3895
3896 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003897 struct hci_chan *chan;
3898
3899 if (conn->type != type)
3900 continue;
3901
3902 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3903 continue;
3904
3905 num++;
3906
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003907 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003908 struct sk_buff *skb;
3909
3910 if (chan->sent) {
3911 chan->sent = 0;
3912 continue;
3913 }
3914
3915 if (skb_queue_empty(&chan->data_q))
3916 continue;
3917
3918 skb = skb_peek(&chan->data_q);
3919 if (skb->priority >= HCI_PRIO_MAX - 1)
3920 continue;
3921
3922 skb->priority = HCI_PRIO_MAX - 1;
3923
3924 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003925 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003926 }
3927
3928 if (hci_conn_num(hdev, type) == num)
3929 break;
3930 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003931
3932 rcu_read_unlock();
3933
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003934}
3935
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003936static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3937{
3938 /* Calculate count of blocks used by this packet */
3939 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3940}
3941
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003942static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003944 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945 /* ACL tx timeout must be longer than maximum
3946 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003947 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003948 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003949 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003951}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003953static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003954{
3955 unsigned int cnt = hdev->acl_cnt;
3956 struct hci_chan *chan;
3957 struct sk_buff *skb;
3958 int quote;
3959
3960 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003961
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003962 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003963 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003964 u32 priority = (skb_peek(&chan->data_q))->priority;
3965 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003966 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003967 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003968
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003969 /* Stop if priority has changed */
3970 if (skb->priority < priority)
3971 break;
3972
3973 skb = skb_dequeue(&chan->data_q);
3974
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003975 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003976 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003977
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003978 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 hdev->acl_last_tx = jiffies;
3980
3981 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003982 chan->sent++;
3983 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 }
3985 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003986
3987 if (cnt != hdev->acl_cnt)
3988 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989}
3990
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003991static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003992{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003993 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003994 struct hci_chan *chan;
3995 struct sk_buff *skb;
3996 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003997 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003998
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003999 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004000
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004001 BT_DBG("%s", hdev->name);
4002
4003 if (hdev->dev_type == HCI_AMP)
4004 type = AMP_LINK;
4005 else
4006 type = ACL_LINK;
4007
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004008 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004009 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004010 u32 priority = (skb_peek(&chan->data_q))->priority;
4011 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4012 int blocks;
4013
4014 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004015 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004016
4017 /* Stop if priority has changed */
4018 if (skb->priority < priority)
4019 break;
4020
4021 skb = skb_dequeue(&chan->data_q);
4022
4023 blocks = __get_blocks(hdev, skb);
4024 if (blocks > hdev->block_cnt)
4025 return;
4026
4027 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004028 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004029
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004030 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004031 hdev->acl_last_tx = jiffies;
4032
4033 hdev->block_cnt -= blocks;
4034 quote -= blocks;
4035
4036 chan->sent += blocks;
4037 chan->conn->sent += blocks;
4038 }
4039 }
4040
4041 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004042 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004043}
4044
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004045static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004046{
4047 BT_DBG("%s", hdev->name);
4048
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004049 /* No ACL link over BR/EDR controller */
4050 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4051 return;
4052
4053 /* No AMP link over AMP controller */
4054 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004055 return;
4056
4057 switch (hdev->flow_ctl_mode) {
4058 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4059 hci_sched_acl_pkt(hdev);
4060 break;
4061
4062 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4063 hci_sched_acl_blk(hdev);
4064 break;
4065 }
4066}
4067
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004069static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070{
4071 struct hci_conn *conn;
4072 struct sk_buff *skb;
4073 int quote;
4074
4075 BT_DBG("%s", hdev->name);
4076
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004077 if (!hci_conn_num(hdev, SCO_LINK))
4078 return;
4079
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4081 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4082 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004083 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084
4085 conn->sent++;
4086 if (conn->sent == ~0)
4087 conn->sent = 0;
4088 }
4089 }
4090}
4091
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004092static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004093{
4094 struct hci_conn *conn;
4095 struct sk_buff *skb;
4096 int quote;
4097
4098 BT_DBG("%s", hdev->name);
4099
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004100 if (!hci_conn_num(hdev, ESCO_LINK))
4101 return;
4102
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004103 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4104 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004105 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4106 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004107 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004108
4109 conn->sent++;
4110 if (conn->sent == ~0)
4111 conn->sent = 0;
4112 }
4113 }
4114}
4115
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004116static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004117{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004118 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004119 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004120 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004121
4122 BT_DBG("%s", hdev->name);
4123
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004124 if (!hci_conn_num(hdev, LE_LINK))
4125 return;
4126
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004127 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004128 /* LE tx timeout must be longer than maximum
4129 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004130 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004131 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004132 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004133 }
4134
4135 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004136 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004137 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004138 u32 priority = (skb_peek(&chan->data_q))->priority;
4139 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004140 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004141 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004142
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004143 /* Stop if priority has changed */
4144 if (skb->priority < priority)
4145 break;
4146
4147 skb = skb_dequeue(&chan->data_q);
4148
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004149 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004150 hdev->le_last_tx = jiffies;
4151
4152 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004153 chan->sent++;
4154 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004155 }
4156 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004157
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004158 if (hdev->le_pkts)
4159 hdev->le_cnt = cnt;
4160 else
4161 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004162
4163 if (cnt != tmp)
4164 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004165}
4166
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004167static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004169 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 struct sk_buff *skb;
4171
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004172 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004173 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004175 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07004176 /* Schedule queues and send stuff to HCI driver */
4177 hci_sched_acl(hdev);
4178 hci_sched_sco(hdev);
4179 hci_sched_esco(hdev);
4180 hci_sched_le(hdev);
4181 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004182
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 /* Send next queued raw (unknown type) packet */
4184 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004185 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186}
4187
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004188/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189
4190/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004191static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192{
4193 struct hci_acl_hdr *hdr = (void *) skb->data;
4194 struct hci_conn *conn;
4195 __u16 handle, flags;
4196
4197 skb_pull(skb, HCI_ACL_HDR_SIZE);
4198
4199 handle = __le16_to_cpu(hdr->handle);
4200 flags = hci_flags(handle);
4201 handle = hci_handle(handle);
4202
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004203 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004204 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205
4206 hdev->stat.acl_rx++;
4207
4208 hci_dev_lock(hdev);
4209 conn = hci_conn_hash_lookup_handle(hdev, handle);
4210 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004211
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004213 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004214
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004216 l2cap_recv_acldata(conn, skb, flags);
4217 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004219 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004220 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 }
4222
4223 kfree_skb(skb);
4224}
4225
4226/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004227static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228{
4229 struct hci_sco_hdr *hdr = (void *) skb->data;
4230 struct hci_conn *conn;
4231 __u16 handle;
4232
4233 skb_pull(skb, HCI_SCO_HDR_SIZE);
4234
4235 handle = __le16_to_cpu(hdr->handle);
4236
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004237 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238
4239 hdev->stat.sco_rx++;
4240
4241 hci_dev_lock(hdev);
4242 conn = hci_conn_hash_lookup_handle(hdev, handle);
4243 hci_dev_unlock(hdev);
4244
4245 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004247 sco_recv_scodata(conn, skb);
4248 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004250 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004251 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252 }
4253
4254 kfree_skb(skb);
4255}
4256
Johan Hedberg9238f362013-03-05 20:37:48 +02004257static bool hci_req_is_complete(struct hci_dev *hdev)
4258{
4259 struct sk_buff *skb;
4260
4261 skb = skb_peek(&hdev->cmd_q);
4262 if (!skb)
4263 return true;
4264
Eyal Birger49a6fe02015-03-01 14:58:25 +02004265 return bt_cb(skb)->req_start;
Johan Hedberg9238f362013-03-05 20:37:48 +02004266}
4267
Johan Hedberg42c6b122013-03-05 20:37:49 +02004268static void hci_resend_last(struct hci_dev *hdev)
4269{
4270 struct hci_command_hdr *sent;
4271 struct sk_buff *skb;
4272 u16 opcode;
4273
4274 if (!hdev->sent_cmd)
4275 return;
4276
4277 sent = (void *) hdev->sent_cmd->data;
4278 opcode = __le16_to_cpu(sent->opcode);
4279 if (opcode == HCI_OP_RESET)
4280 return;
4281
4282 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4283 if (!skb)
4284 return;
4285
4286 skb_queue_head(&hdev->cmd_q, skb);
4287 queue_work(hdev->workqueue, &hdev->cmd_work);
4288}
4289
Johan Hedberg9238f362013-03-05 20:37:48 +02004290void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4291{
4292 hci_req_complete_t req_complete = NULL;
4293 struct sk_buff *skb;
4294 unsigned long flags;
4295
4296 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4297
Johan Hedberg42c6b122013-03-05 20:37:49 +02004298 /* If the completed command doesn't match the last one that was
4299 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004300 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004301 if (!hci_sent_cmd_data(hdev, opcode)) {
4302 /* Some CSR based controllers generate a spontaneous
4303 * reset complete event during init and any pending
4304 * command will never be completed. In such a case we
4305 * need to resend whatever was the last sent
4306 * command.
4307 */
4308 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4309 hci_resend_last(hdev);
4310
Johan Hedberg9238f362013-03-05 20:37:48 +02004311 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004312 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004313
4314 /* If the command succeeded and there's still more commands in
4315 * this request the request is not yet complete.
4316 */
4317 if (!status && !hci_req_is_complete(hdev))
4318 return;
4319
4320 /* If this was the last command in a request the complete
4321 * callback would be found in hdev->sent_cmd instead of the
4322 * command queue (hdev->cmd_q).
4323 */
4324 if (hdev->sent_cmd) {
Eyal Birger49a6fe02015-03-01 14:58:25 +02004325 req_complete = bt_cb(hdev->sent_cmd)->req_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004326
4327 if (req_complete) {
4328 /* We must set the complete callback to NULL to
4329 * avoid calling the callback more than once if
4330 * this function gets called again.
4331 */
Eyal Birger49a6fe02015-03-01 14:58:25 +02004332 bt_cb(hdev->sent_cmd)->req_complete = NULL;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004333
Johan Hedberg9238f362013-03-05 20:37:48 +02004334 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004335 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004336 }
4337
4338 /* Remove all pending commands belonging to this request */
4339 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4340 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Eyal Birger49a6fe02015-03-01 14:58:25 +02004341 if (bt_cb(skb)->req_start) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004342 __skb_queue_head(&hdev->cmd_q, skb);
4343 break;
4344 }
4345
Eyal Birger49a6fe02015-03-01 14:58:25 +02004346 req_complete = bt_cb(skb)->req_complete;
Johan Hedberg9238f362013-03-05 20:37:48 +02004347 kfree_skb(skb);
4348 }
4349 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4350
4351call_complete:
4352 if (req_complete)
Marcel Holtmann1904a852015-01-11 13:50:44 -08004353 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
Johan Hedberg9238f362013-03-05 20:37:48 +02004354}
4355
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004356static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004358 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359 struct sk_buff *skb;
4360
4361 BT_DBG("%s", hdev->name);
4362
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004364 /* Send copy to monitor */
4365 hci_send_to_monitor(hdev, skb);
4366
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367 if (atomic_read(&hdev->promisc)) {
4368 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004369 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004370 }
4371
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004372 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373 kfree_skb(skb);
4374 continue;
4375 }
4376
4377 if (test_bit(HCI_INIT, &hdev->flags)) {
4378 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004379 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380 case HCI_ACLDATA_PKT:
4381 case HCI_SCODATA_PKT:
4382 kfree_skb(skb);
4383 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004384 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385 }
4386
4387 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004388 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004389 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004390 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391 hci_event_packet(hdev, skb);
4392 break;
4393
4394 case HCI_ACLDATA_PKT:
4395 BT_DBG("%s ACL data packet", hdev->name);
4396 hci_acldata_packet(hdev, skb);
4397 break;
4398
4399 case HCI_SCODATA_PKT:
4400 BT_DBG("%s SCO data packet", hdev->name);
4401 hci_scodata_packet(hdev, skb);
4402 break;
4403
4404 default:
4405 kfree_skb(skb);
4406 break;
4407 }
4408 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409}
4410
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004411static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004413 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414 struct sk_buff *skb;
4415
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004416 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4417 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004420 if (atomic_read(&hdev->cmd_cnt)) {
4421 skb = skb_dequeue(&hdev->cmd_q);
4422 if (!skb)
4423 return;
4424
Wei Yongjun7585b972009-02-25 18:29:52 +08004425 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004427 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004428 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004429 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004430 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004431 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004432 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004433 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004434 schedule_delayed_work(&hdev->cmd_timer,
4435 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436 } else {
4437 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004438 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439 }
4440 }
4441}