blob: 96572a48948eb6e10aa2c248c7e182e610ac79fd [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
54DEFINE_RWLOCK(hci_cb_list_lock);
55
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmann111902f2014-06-21 04:53:17 +020083 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
Marcel Holtmann111902f2014-06-21 04:53:17 +0200109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
Marcel Holtmann111902f2014-06-21 04:53:17 +0200130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* ---- HCI requests ---- */
143
Marcel Holtmann1904a852015-01-11 13:50:44 -0800144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
152 }
153}
154
155static void hci_req_cancel(struct hci_dev *hdev, int err)
156{
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
163 }
164}
165
Fengguang Wu77a63e02013-04-20 16:24:31 +0300166static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300168{
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
172
173 hci_dev_lock(hdev);
174
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
177
178 hci_dev_unlock(hdev);
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
186 }
187
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
195 }
196
Johan Hedberg75e84b72013-04-02 13:35:04 +0300197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
200 }
201
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
205 }
206
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
209
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
212
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
215
216failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
219}
220
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300221struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300222 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300223{
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
226 int err = 0;
227
228 BT_DBG("%s", hdev->name);
229
230 hci_req_init(&req, hdev);
231
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300232 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300233
234 hdev->req_status = HCI_REQ_PEND;
235
Johan Hedberg75e84b72013-04-02 13:35:04 +0300236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
238
Chan-yeol Park039fada2014-10-31 14:23:06 +0900239 err = hci_req_run(&req, hci_req_sync_complete);
240 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200242 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900243 return ERR_PTR(err);
244 }
245
Johan Hedberg75e84b72013-04-02 13:35:04 +0300246 schedule_timeout(timeout);
247
248 remove_wait_queue(&hdev->req_wait_q, &wait);
249
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
252
253 switch (hdev->req_status) {
254 case HCI_REQ_DONE:
255 err = -bt_to_errno(hdev->req_result);
256 break;
257
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
260 break;
261
262 default:
263 err = -ETIMEDOUT;
264 break;
265 }
266
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 if (err < 0)
272 return ERR_PTR(err);
273
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300274 return hci_get_cmd_complete(hdev, opcode, event);
275}
276EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300279 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300280{
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300282}
283EXPORT_SYMBOL(__hci_cmd_sync);
284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200286static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200287 void (*func)(struct hci_request *req,
288 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200289 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 DECLARE_WAITQUEUE(wait, current);
293 int err = 0;
294
295 BT_DBG("%s start", hdev->name);
296
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_init(&req, hdev);
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 hdev->req_status = HCI_REQ_PEND;
300
Johan Hedberg42c6b122013-03-05 20:37:49 +0200301 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200302
Chan-yeol Park039fada2014-10-31 14:23:06 +0900303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
305
Johan Hedberg42c6b122013-03-05 20:37:49 +0200306 err = hci_req_run(&req, hci_req_sync_complete);
307 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200308 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300309
Chan-yeol Park039fada2014-10-31 14:23:06 +0900310 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200311 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900312
Andre Guedes920c8302013-03-08 11:20:15 -0300313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 */
Andre Guedes920c8302013-03-08 11:20:15 -0300318 if (err == -ENODATA)
319 return 0;
320
321 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200322 }
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 schedule_timeout(timeout);
325
326 remove_wait_queue(&hdev->req_wait_q, &wait);
327
328 if (signal_pending(current))
329 return -EINTR;
330
331 switch (hdev->req_status) {
332 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700333 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 break;
335
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
338 break;
339
340 default:
341 err = -ETIMEDOUT;
342 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Johan Hedberga5040ef2011-01-10 13:28:59 +0200345 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 BT_DBG("%s end: err %d", hdev->name, err);
348
349 return err;
350}
351
Johan Hedberg01178cd2013-03-05 20:37:41 +0200352static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200353 void (*req)(struct hci_request *req,
354 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200355 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
357 int ret;
358
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200359 if (!test_bit(HCI_UP, &hdev->flags))
360 return -ENETDOWN;
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 /* Serialize all requests */
363 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200364 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 hci_req_unlock(hdev);
366
367 return ret;
368}
369
Johan Hedberg42c6b122013-03-05 20:37:49 +0200370static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200372 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200386 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200388
389 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
392
Johan Hedberg42c6b122013-03-05 20:37:49 +0200393static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200394{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200396
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200397 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300399
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300406 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300408
409 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700411
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700412 /* Read Flow Control Mode */
413 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700415 /* Read Location Data */
416 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200417}
418
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200420{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200421 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200422
423 BT_DBG("%s %ld", hdev->name, opt);
424
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300425 /* Reset */
426 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300428
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200429 switch (hdev->dev_type) {
430 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200432 break;
433
434 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200436 break;
437
438 default:
439 BT_ERR("Unknown device type %d", hdev->dev_type);
440 break;
441 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200442}
443
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200446 __le16 param;
447 __u8 flt_type;
448
449 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200450 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451
452 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200454
455 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200456 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200457
458 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200459 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200460
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700461 /* Read Number of Supported IAC */
462 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700464 /* Read Current IAC LAP */
465 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467 /* Clear Event Filters */
468 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200469 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200470
471 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700472 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200473 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200474}
475
Johan Hedberg42c6b122013-03-05 20:37:49 +0200476static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200477{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300478 struct hci_dev *hdev = req->hdev;
479
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482
483 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200484 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200485
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800486 /* Read LE Supported States */
487 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800492 /* Clear LE White List */
493 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300494
495 /* LE-only controllers have LE implicitly enabled */
496 if (!lmp_bredr_capable(hdev))
497 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498}
499
Johan Hedberg42c6b122013-03-05 20:37:49 +0200500static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200501{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502 struct hci_dev *hdev = req->hdev;
503
Johan Hedberg2177bab2013-03-05 20:37:43 +0200504 /* The second byte is 0xff instead of 0x9f (two reserved bits
505 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
506 * command otherwise.
507 */
508 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
509
510 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
511 * any event mask for pre 1.2 devices.
512 */
513 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
514 return;
515
516 if (lmp_bredr_capable(hdev)) {
517 events[4] |= 0x01; /* Flow Specification Complete */
518 events[4] |= 0x02; /* Inquiry Result with RSSI */
519 events[4] |= 0x04; /* Read Remote Extended Features Complete */
520 events[5] |= 0x08; /* Synchronous Connection Complete */
521 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700522 } else {
523 /* Use a different default for LE-only devices */
524 memset(events, 0, sizeof(events));
525 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700526 events[1] |= 0x08; /* Read Remote Version Information Complete */
527 events[1] |= 0x20; /* Command Complete */
528 events[1] |= 0x40; /* Command Status */
529 events[1] |= 0x80; /* Hardware Error */
530 events[2] |= 0x04; /* Number of Completed Packets */
531 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200532
533 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
534 events[0] |= 0x80; /* Encryption Change */
535 events[5] |= 0x80; /* Encryption Key Refresh Complete */
536 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200537 }
538
539 if (lmp_inq_rssi_capable(hdev))
540 events[4] |= 0x02; /* Inquiry Result with RSSI */
541
542 if (lmp_sniffsubr_capable(hdev))
543 events[5] |= 0x20; /* Sniff Subrating */
544
545 if (lmp_pause_enc_capable(hdev))
546 events[5] |= 0x80; /* Encryption Key Refresh Complete */
547
548 if (lmp_ext_inq_capable(hdev))
549 events[5] |= 0x40; /* Extended Inquiry Result */
550
551 if (lmp_no_flush_capable(hdev))
552 events[7] |= 0x01; /* Enhanced Flush Complete */
553
554 if (lmp_lsto_capable(hdev))
555 events[6] |= 0x80; /* Link Supervision Timeout Changed */
556
557 if (lmp_ssp_capable(hdev)) {
558 events[6] |= 0x01; /* IO Capability Request */
559 events[6] |= 0x02; /* IO Capability Response */
560 events[6] |= 0x04; /* User Confirmation Request */
561 events[6] |= 0x08; /* User Passkey Request */
562 events[6] |= 0x10; /* Remote OOB Data Request */
563 events[6] |= 0x20; /* Simple Pairing Complete */
564 events[7] |= 0x04; /* User Passkey Notification */
565 events[7] |= 0x08; /* Keypress Notification */
566 events[7] |= 0x10; /* Remote Host Supported
567 * Features Notification
568 */
569 }
570
571 if (lmp_le_capable(hdev))
572 events[7] |= 0x20; /* LE Meta-Event */
573
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575}
576
Johan Hedberg42c6b122013-03-05 20:37:49 +0200577static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200578{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579 struct hci_dev *hdev = req->hdev;
580
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200582 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300583 else
584 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200585
586 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200587 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200588
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100589 /* All Bluetooth 1.2 and later controllers should support the
590 * HCI command for reading the local supported commands.
591 *
592 * Unfortunately some controllers indicate Bluetooth 1.2 support,
593 * but do not have support for this command. If that is the case,
594 * the driver can quirk the behavior and skip reading the local
595 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300596 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100597 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
598 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200599 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200600
601 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700602 /* When SSP is available, then the host features page
603 * should also be available as well. However some
604 * controllers list the max_page as 0 as long as SSP
605 * has not been enabled. To achieve proper debugging
606 * output, force the minimum max_page to 1 at least.
607 */
608 hdev->max_page = 0x01;
609
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
611 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
613 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200614 } else {
615 struct hci_cp_write_eir cp;
616
617 memset(hdev->eir, 0, sizeof(hdev->eir));
618 memset(&cp, 0, sizeof(cp));
619
Johan Hedberg42c6b122013-03-05 20:37:49 +0200620 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200621 }
622 }
623
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800624 if (lmp_inq_rssi_capable(hdev) ||
625 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800626 u8 mode;
627
628 /* If Extended Inquiry Result events are supported, then
629 * they are clearly preferred over Inquiry Result with RSSI
630 * events.
631 */
632 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
633
634 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
635 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200636
637 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200639
640 if (lmp_ext_feat_capable(hdev)) {
641 struct hci_cp_read_local_ext_features cp;
642
643 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200644 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
645 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200646 }
647
648 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
649 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200650 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
651 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200652 }
653}
654
Johan Hedberg42c6b122013-03-05 20:37:49 +0200655static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200656{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200657 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200658 struct hci_cp_write_def_link_policy cp;
659 u16 link_policy = 0;
660
661 if (lmp_rswitch_capable(hdev))
662 link_policy |= HCI_LP_RSWITCH;
663 if (lmp_hold_capable(hdev))
664 link_policy |= HCI_LP_HOLD;
665 if (lmp_sniff_capable(hdev))
666 link_policy |= HCI_LP_SNIFF;
667 if (lmp_park_capable(hdev))
668 link_policy |= HCI_LP_PARK;
669
670 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200671 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200672}
673
Johan Hedberg42c6b122013-03-05 20:37:49 +0200674static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200675{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200676 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677 struct hci_cp_write_le_host_supported cp;
678
Johan Hedbergc73eee92013-04-19 18:35:21 +0300679 /* LE-only devices do not support explicit enablement */
680 if (!lmp_bredr_capable(hdev))
681 return;
682
Johan Hedberg2177bab2013-03-05 20:37:43 +0200683 memset(&cp, 0, sizeof(cp));
684
685 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
686 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200687 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200688 }
689
690 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200691 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
692 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200693}
694
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300695static void hci_set_event_mask_page_2(struct hci_request *req)
696{
697 struct hci_dev *hdev = req->hdev;
698 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
699
700 /* If Connectionless Slave Broadcast master role is supported
701 * enable all necessary events for it.
702 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800703 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300704 events[1] |= 0x40; /* Triggered Clock Capture */
705 events[1] |= 0x80; /* Synchronization Train Complete */
706 events[2] |= 0x10; /* Slave Page Response Timeout */
707 events[2] |= 0x20; /* CSB Channel Map Change */
708 }
709
710 /* If Connectionless Slave Broadcast slave role is supported
711 * enable all necessary events for it.
712 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800713 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300714 events[2] |= 0x01; /* Synchronization Train Received */
715 events[2] |= 0x02; /* CSB Receive */
716 events[2] |= 0x04; /* CSB Timeout */
717 events[2] |= 0x08; /* Truncated Page Complete */
718 }
719
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800720 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200721 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800722 events[2] |= 0x80;
723
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300724 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
725}
726
Johan Hedberg42c6b122013-03-05 20:37:49 +0200727static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200728{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200729 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300730 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200731
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200732 hci_setup_event_mask(req);
733
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800734 if (hdev->commands[6] & 0x20) {
735 struct hci_cp_read_stored_link_key cp;
736
737 bacpy(&cp.bdaddr, BDADDR_ANY);
738 cp.read_all = 0x01;
739 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
740 }
741
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100742 /* Some Broadcom based Bluetooth controllers do not support the
743 * Delete Stored Link Key command. They are clearly indicating its
744 * absence in the bit mask of supported commands.
745 *
746 * Check the supported commands and only if the the command is marked
747 * as supported send it. If not supported assume that the controller
748 * does not have actual support for stored link keys which makes this
749 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -0800750 *
751 * Some controllers indicate that they support handling deleting
752 * stored link keys, but they don't. The quirk lets a driver
753 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700754 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -0800755 if (hdev->commands[6] & 0x80 &&
756 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +0300757 struct hci_cp_delete_stored_link_key cp;
758
759 bacpy(&cp.bdaddr, BDADDR_ANY);
760 cp.delete_all = 0x01;
761 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
762 sizeof(cp), &cp);
763 }
764
Johan Hedberg2177bab2013-03-05 20:37:43 +0200765 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200766 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200767
Marcel Holtmann417287d2014-12-11 20:21:54 +0100768 if (hdev->commands[8] & 0x01)
769 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
770
771 /* Some older Broadcom based Bluetooth 1.2 controllers do not
772 * support the Read Page Scan Type command. Check support for
773 * this command in the bit mask of supported commands.
774 */
775 if (hdev->commands[13] & 0x01)
776 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
777
Andre Guedes9193c6e2014-07-01 18:10:09 -0300778 if (lmp_le_capable(hdev)) {
779 u8 events[8];
780
781 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200782 events[0] = 0x0f;
783
784 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
785 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300786
787 /* If controller supports the Connection Parameters Request
788 * Link Layer Procedure, enable the corresponding event.
789 */
790 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
791 events[0] |= 0x20; /* LE Remote Connection
792 * Parameter Request
793 */
794
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100795 /* If the controller supports the Data Length Extension
796 * feature, enable the corresponding event.
797 */
798 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
799 events[0] |= 0x40; /* LE Data Length Change */
800
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100801 /* If the controller supports Extended Scanner Filter
802 * Policies, enable the correspondig event.
803 */
804 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
805 events[1] |= 0x04; /* LE Direct Advertising
806 * Report
807 */
808
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100809 /* If the controller supports the LE Read Local P-256
810 * Public Key command, enable the corresponding event.
811 */
812 if (hdev->commands[34] & 0x02)
813 events[0] |= 0x80; /* LE Read Local P-256
814 * Public Key Complete
815 */
816
817 /* If the controller supports the LE Generate DHKey
818 * command, enable the corresponding event.
819 */
820 if (hdev->commands[34] & 0x04)
821 events[1] |= 0x01; /* LE Generate DHKey Complete */
822
Andre Guedes9193c6e2014-07-01 18:10:09 -0300823 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
824 events);
825
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200826 if (hdev->commands[25] & 0x40) {
827 /* Read LE Advertising Channel TX Power */
828 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
829 }
830
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100831 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
832 /* Read LE Maximum Data Length */
833 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
834
835 /* Read LE Suggested Default Data Length */
836 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
837 }
838
Johan Hedberg42c6b122013-03-05 20:37:49 +0200839 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300840 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300841
842 /* Read features beyond page 1 if available */
843 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
844 struct hci_cp_read_local_ext_features cp;
845
846 cp.page = p;
847 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
848 sizeof(cp), &cp);
849 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200850}
851
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300852static void hci_init4_req(struct hci_request *req, unsigned long opt)
853{
854 struct hci_dev *hdev = req->hdev;
855
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300856 /* Set event mask page 2 if the HCI command for it is supported */
857 if (hdev->commands[22] & 0x04)
858 hci_set_event_mask_page_2(req);
859
Marcel Holtmann109e3192014-07-23 19:24:56 +0200860 /* Read local codec list if the HCI command is supported */
861 if (hdev->commands[29] & 0x20)
862 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
863
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200864 /* Get MWS transport configuration if the HCI command is supported */
865 if (hdev->commands[30] & 0x08)
866 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
867
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300868 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800869 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300870 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800871
872 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +0300873 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800874 u8 support = 0x01;
875 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
876 sizeof(support), &support);
877 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300878}
879
Johan Hedberg2177bab2013-03-05 20:37:43 +0200880static int __hci_init(struct hci_dev *hdev)
881{
882 int err;
883
884 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
885 if (err < 0)
886 return err;
887
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700888 /* The Device Under Test (DUT) mode is special and available for
889 * all controller types. So just create it early on.
890 */
891 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
892 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
893 &dut_mode_fops);
894 }
895
Johan Hedberg2177bab2013-03-05 20:37:43 +0200896 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
897 * BR/EDR/LE type controllers. AMP controllers only need the
898 * first stage init.
899 */
900 if (hdev->dev_type != HCI_BREDR)
901 return 0;
902
903 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
904 if (err < 0)
905 return err;
906
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300907 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
908 if (err < 0)
909 return err;
910
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700911 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
912 if (err < 0)
913 return err;
914
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800915 /* This function is only called when the controller is actually in
916 * configured state. When the controller is marked as unconfigured,
917 * this initialization procedure is not run.
918 *
919 * It means that it is possible that a controller runs through its
920 * setup phase and then discovers missing settings. If that is the
921 * case, then this function will not be called. It then will only
922 * be called during the config phase.
923 *
924 * So only when in setup phase or config phase, create the debugfs
925 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700926 */
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800927 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
928 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700929 return 0;
930
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100931 hci_debugfs_create_common(hdev);
932
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100933 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100934 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700935
Marcel Holtmannd0f729b2013-10-18 15:23:46 -0700936 if (lmp_le_capable(hdev)) {
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100937 hci_debugfs_create_le(hdev);
Johan Hedberg711eafe2014-08-08 09:32:52 +0300938 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -0700939 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700940
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700941 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200942}
943
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200944static void hci_init0_req(struct hci_request *req, unsigned long opt)
945{
946 struct hci_dev *hdev = req->hdev;
947
948 BT_DBG("%s %ld", hdev->name, opt);
949
950 /* Reset */
951 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
952 hci_reset_req(req, 0);
953
954 /* Read Local Version */
955 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
956
957 /* Read BD Address */
958 if (hdev->set_bdaddr)
959 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
960}
961
962static int __hci_unconf_init(struct hci_dev *hdev)
963{
964 int err;
965
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200966 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
967 return 0;
968
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200969 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
970 if (err < 0)
971 return err;
972
973 return 0;
974}
975
Johan Hedberg42c6b122013-03-05 20:37:49 +0200976static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977{
978 __u8 scan = opt;
979
Johan Hedberg42c6b122013-03-05 20:37:49 +0200980 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
982 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200983 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984}
985
Johan Hedberg42c6b122013-03-05 20:37:49 +0200986static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987{
988 __u8 auth = opt;
989
Johan Hedberg42c6b122013-03-05 20:37:49 +0200990 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991
992 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200993 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994}
995
Johan Hedberg42c6b122013-03-05 20:37:49 +0200996static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997{
998 __u8 encrypt = opt;
999
Johan Hedberg42c6b122013-03-05 20:37:49 +02001000 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001002 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001003 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004}
1005
Johan Hedberg42c6b122013-03-05 20:37:49 +02001006static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001007{
1008 __le16 policy = cpu_to_le16(opt);
1009
Johan Hedberg42c6b122013-03-05 20:37:49 +02001010 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001011
1012 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001013 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001014}
1015
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001016/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 * Device is held on return. */
1018struct hci_dev *hci_dev_get(int index)
1019{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001020 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
1022 BT_DBG("%d", index);
1023
1024 if (index < 0)
1025 return NULL;
1026
1027 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001028 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 if (d->id == index) {
1030 hdev = hci_dev_hold(d);
1031 break;
1032 }
1033 }
1034 read_unlock(&hci_dev_list_lock);
1035 return hdev;
1036}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
1038/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001039
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001040bool hci_discovery_active(struct hci_dev *hdev)
1041{
1042 struct discovery_state *discov = &hdev->discovery;
1043
Andre Guedes6fbe1952012-02-03 17:47:58 -03001044 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001045 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001046 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001047 return true;
1048
Andre Guedes6fbe1952012-02-03 17:47:58 -03001049 default:
1050 return false;
1051 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001052}
1053
Johan Hedbergff9ef572012-01-04 14:23:45 +02001054void hci_discovery_set_state(struct hci_dev *hdev, int state)
1055{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001056 int old_state = hdev->discovery.state;
1057
Johan Hedbergff9ef572012-01-04 14:23:45 +02001058 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1059
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001060 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001061 return;
1062
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001063 hdev->discovery.state = state;
1064
Johan Hedbergff9ef572012-01-04 14:23:45 +02001065 switch (state) {
1066 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001067 hci_update_background_scan(hdev);
1068
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001069 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001070 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001071 break;
1072 case DISCOVERY_STARTING:
1073 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001074 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001075 mgmt_discovering(hdev, 1);
1076 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001077 case DISCOVERY_RESOLVING:
1078 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001079 case DISCOVERY_STOPPING:
1080 break;
1081 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001082}
1083
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001084void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085{
Johan Hedberg30883512012-01-04 14:16:21 +02001086 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001087 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088
Johan Hedberg561aafb2012-01-04 13:31:59 +02001089 list_for_each_entry_safe(p, n, &cache->all, all) {
1090 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001091 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001093
1094 INIT_LIST_HEAD(&cache->unknown);
1095 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096}
1097
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001098struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1099 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100{
Johan Hedberg30883512012-01-04 14:16:21 +02001101 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 struct inquiry_entry *e;
1103
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001104 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
Johan Hedberg561aafb2012-01-04 13:31:59 +02001106 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001108 return e;
1109 }
1110
1111 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112}
1113
Johan Hedberg561aafb2012-01-04 13:31:59 +02001114struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001115 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001116{
Johan Hedberg30883512012-01-04 14:16:21 +02001117 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001118 struct inquiry_entry *e;
1119
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001120 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001121
1122 list_for_each_entry(e, &cache->unknown, list) {
1123 if (!bacmp(&e->data.bdaddr, bdaddr))
1124 return e;
1125 }
1126
1127 return NULL;
1128}
1129
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001130struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001131 bdaddr_t *bdaddr,
1132 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001133{
1134 struct discovery_state *cache = &hdev->discovery;
1135 struct inquiry_entry *e;
1136
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001137 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001138
1139 list_for_each_entry(e, &cache->resolve, list) {
1140 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1141 return e;
1142 if (!bacmp(&e->data.bdaddr, bdaddr))
1143 return e;
1144 }
1145
1146 return NULL;
1147}
1148
Johan Hedberga3d4e202012-01-09 00:53:02 +02001149void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001150 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001151{
1152 struct discovery_state *cache = &hdev->discovery;
1153 struct list_head *pos = &cache->resolve;
1154 struct inquiry_entry *p;
1155
1156 list_del(&ie->list);
1157
1158 list_for_each_entry(p, &cache->resolve, list) {
1159 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001160 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001161 break;
1162 pos = &p->list;
1163 }
1164
1165 list_add(&ie->list, pos);
1166}
1167
Marcel Holtmannaf589252014-07-01 14:11:20 +02001168u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1169 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170{
Johan Hedberg30883512012-01-04 14:16:21 +02001171 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001172 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001173 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001175 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
Johan Hedberg6928a922014-10-26 20:46:09 +01001177 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001178
Marcel Holtmannaf589252014-07-01 14:11:20 +02001179 if (!data->ssp_mode)
1180 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001181
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001182 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001183 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001184 if (!ie->data.ssp_mode)
1185 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001186
Johan Hedberga3d4e202012-01-09 00:53:02 +02001187 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001188 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001189 ie->data.rssi = data->rssi;
1190 hci_inquiry_cache_update_resolve(hdev, ie);
1191 }
1192
Johan Hedberg561aafb2012-01-04 13:31:59 +02001193 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001194 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001195
Johan Hedberg561aafb2012-01-04 13:31:59 +02001196 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001197 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001198 if (!ie) {
1199 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1200 goto done;
1201 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001202
1203 list_add(&ie->all, &cache->all);
1204
1205 if (name_known) {
1206 ie->name_state = NAME_KNOWN;
1207 } else {
1208 ie->name_state = NAME_NOT_KNOWN;
1209 list_add(&ie->list, &cache->unknown);
1210 }
1211
1212update:
1213 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001214 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001215 ie->name_state = NAME_KNOWN;
1216 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 }
1218
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001219 memcpy(&ie->data, data, sizeof(*data));
1220 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001222
1223 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001224 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001225
Marcel Holtmannaf589252014-07-01 14:11:20 +02001226done:
1227 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228}
1229
1230static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1231{
Johan Hedberg30883512012-01-04 14:16:21 +02001232 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 struct inquiry_info *info = (struct inquiry_info *) buf;
1234 struct inquiry_entry *e;
1235 int copied = 0;
1236
Johan Hedberg561aafb2012-01-04 13:31:59 +02001237 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001239
1240 if (copied >= num)
1241 break;
1242
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 bacpy(&info->bdaddr, &data->bdaddr);
1244 info->pscan_rep_mode = data->pscan_rep_mode;
1245 info->pscan_period_mode = data->pscan_period_mode;
1246 info->pscan_mode = data->pscan_mode;
1247 memcpy(info->dev_class, data->dev_class, 3);
1248 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001251 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 }
1253
1254 BT_DBG("cache %p, copied %d", cache, copied);
1255 return copied;
1256}
1257
Johan Hedberg42c6b122013-03-05 20:37:49 +02001258static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259{
1260 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 struct hci_cp_inquiry cp;
1263
1264 BT_DBG("%s", hdev->name);
1265
1266 if (test_bit(HCI_INQUIRY, &hdev->flags))
1267 return;
1268
1269 /* Start Inquiry */
1270 memcpy(&cp.lap, &ir->lap, 3);
1271 cp.length = ir->length;
1272 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274}
1275
1276int hci_inquiry(void __user *arg)
1277{
1278 __u8 __user *ptr = arg;
1279 struct hci_inquiry_req ir;
1280 struct hci_dev *hdev;
1281 int err = 0, do_inquiry = 0, max_rsp;
1282 long timeo;
1283 __u8 *buf;
1284
1285 if (copy_from_user(&ir, ptr, sizeof(ir)))
1286 return -EFAULT;
1287
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001288 hdev = hci_dev_get(ir.dev_id);
1289 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 return -ENODEV;
1291
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001292 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1293 err = -EBUSY;
1294 goto done;
1295 }
1296
Marcel Holtmann4a964402014-07-02 19:10:33 +02001297 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001298 err = -EOPNOTSUPP;
1299 goto done;
1300 }
1301
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001302 if (hdev->dev_type != HCI_BREDR) {
1303 err = -EOPNOTSUPP;
1304 goto done;
1305 }
1306
Johan Hedberg56f87902013-10-02 13:43:13 +03001307 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1308 err = -EOPNOTSUPP;
1309 goto done;
1310 }
1311
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001312 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001313 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001314 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001315 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 do_inquiry = 1;
1317 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001318 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319
Marcel Holtmann04837f62006-07-03 10:02:33 +02001320 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001321
1322 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001323 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1324 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001325 if (err < 0)
1326 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001327
1328 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1329 * cleared). If it is interrupted by a signal, return -EINTR.
1330 */
NeilBrown74316202014-07-07 15:16:04 +10001331 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001332 TASK_INTERRUPTIBLE))
1333 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001334 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001336 /* for unlimited number of responses we will use buffer with
1337 * 255 entries
1338 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1340
1341 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1342 * copy it to the user space.
1343 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001344 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001345 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 err = -ENOMEM;
1347 goto done;
1348 }
1349
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001350 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001352 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
1354 BT_DBG("num_rsp %d", ir.num_rsp);
1355
1356 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1357 ptr += sizeof(ir);
1358 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001359 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001361 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 err = -EFAULT;
1363
1364 kfree(buf);
1365
1366done:
1367 hci_dev_put(hdev);
1368 return err;
1369}
1370
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001371static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 int ret = 0;
1374
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 BT_DBG("%s %p", hdev->name, hdev);
1376
1377 hci_req_lock(hdev);
1378
Johan Hovold94324962012-03-15 14:48:41 +01001379 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1380 ret = -ENODEV;
1381 goto done;
1382 }
1383
Marcel Holtmannd603b762014-07-06 12:11:14 +02001384 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1385 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001386 /* Check for rfkill but allow the HCI setup stage to
1387 * proceed (which in itself doesn't cause any RF activity).
1388 */
1389 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1390 ret = -ERFKILL;
1391 goto done;
1392 }
1393
1394 /* Check for valid public address or a configured static
1395 * random adddress, but let the HCI setup proceed to
1396 * be able to determine if there is a public address
1397 * or not.
1398 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001399 * In case of user channel usage, it is not important
1400 * if a public address or static random address is
1401 * available.
1402 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001403 * This check is only valid for BR/EDR controllers
1404 * since AMP controllers do not have an address.
1405 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001406 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1407 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001408 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1409 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1410 ret = -EADDRNOTAVAIL;
1411 goto done;
1412 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001413 }
1414
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 if (test_bit(HCI_UP, &hdev->flags)) {
1416 ret = -EALREADY;
1417 goto done;
1418 }
1419
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 if (hdev->open(hdev)) {
1421 ret = -EIO;
1422 goto done;
1423 }
1424
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001425 atomic_set(&hdev->cmd_cnt, 1);
1426 set_bit(HCI_INIT, &hdev->flags);
1427
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001428 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1429 if (hdev->setup)
1430 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001431
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001432 /* The transport driver can set these quirks before
1433 * creating the HCI device or in its setup callback.
1434 *
1435 * In case any of them is set, the controller has to
1436 * start up as unconfigured.
1437 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001438 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1439 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001440 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001441
1442 /* For an unconfigured controller it is required to
1443 * read at least the version information provided by
1444 * the Read Local Version Information command.
1445 *
1446 * If the set_bdaddr driver callback is provided, then
1447 * also the original Bluetooth public device address
1448 * will be read using the Read BD Address command.
1449 */
1450 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1451 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001452 }
1453
Marcel Holtmann9713c172014-07-06 12:11:15 +02001454 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1455 /* If public address change is configured, ensure that
1456 * the address gets programmed. If the driver does not
1457 * support changing the public address, fail the power
1458 * on procedure.
1459 */
1460 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1461 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001462 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1463 else
1464 ret = -EADDRNOTAVAIL;
1465 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001466
1467 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02001468 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001469 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001470 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 }
1472
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001473 clear_bit(HCI_INIT, &hdev->flags);
1474
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 if (!ret) {
1476 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02001477 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 set_bit(HCI_UP, &hdev->flags);
1479 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001480 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02001481 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02001482 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001483 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001484 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001485 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001486 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001487 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001488 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001489 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001491 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001492 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001493 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
1495 skb_queue_purge(&hdev->cmd_q);
1496 skb_queue_purge(&hdev->rx_q);
1497
1498 if (hdev->flush)
1499 hdev->flush(hdev);
1500
1501 if (hdev->sent_cmd) {
1502 kfree_skb(hdev->sent_cmd);
1503 hdev->sent_cmd = NULL;
1504 }
1505
1506 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001507 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 }
1509
1510done:
1511 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 return ret;
1513}
1514
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001515/* ---- HCI ioctl helpers ---- */
1516
1517int hci_dev_open(__u16 dev)
1518{
1519 struct hci_dev *hdev;
1520 int err;
1521
1522 hdev = hci_dev_get(dev);
1523 if (!hdev)
1524 return -ENODEV;
1525
Marcel Holtmann4a964402014-07-02 19:10:33 +02001526 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001527 * up as user channel. Trying to bring them up as normal devices
1528 * will result into a failure. Only user channel operation is
1529 * possible.
1530 *
1531 * When this function is called for a user channel, the flag
1532 * HCI_USER_CHANNEL will be set first before attempting to
1533 * open the device.
1534 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02001535 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001536 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1537 err = -EOPNOTSUPP;
1538 goto done;
1539 }
1540
Johan Hedberge1d08f42013-10-01 22:44:50 +03001541 /* We need to ensure that no other power on/off work is pending
1542 * before proceeding to call hci_dev_do_open. This is
1543 * particularly important if the setup procedure has not yet
1544 * completed.
1545 */
1546 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1547 cancel_delayed_work(&hdev->power_off);
1548
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001549 /* After this call it is guaranteed that the setup procedure
1550 * has finished. This means that error conditions like RFKILL
1551 * or no valid public or static random address apply.
1552 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001553 flush_workqueue(hdev->req_workqueue);
1554
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001555 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001556 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001557 * so that pairing works for them. Once the management interface
1558 * is in use this bit will be cleared again and userspace has
1559 * to explicitly enable it.
1560 */
1561 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1562 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001563 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001564
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001565 err = hci_dev_do_open(hdev);
1566
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001567done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001568 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001569 return err;
1570}
1571
Johan Hedbergd7347f32014-07-04 12:37:23 +03001572/* This function requires the caller holds hdev->lock */
1573static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1574{
1575 struct hci_conn_params *p;
1576
Johan Hedbergf161dd42014-08-15 21:06:54 +03001577 list_for_each_entry(p, &hdev->le_conn_params, list) {
1578 if (p->conn) {
1579 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001580 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001581 p->conn = NULL;
1582 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001583 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001584 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001585
1586 BT_DBG("All LE pending actions cleared");
1587}
1588
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589static int hci_dev_do_close(struct hci_dev *hdev)
1590{
1591 BT_DBG("%s %p", hdev->name, hdev);
1592
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001593 cancel_delayed_work(&hdev->power_off);
1594
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 hci_req_cancel(hdev, ENODEV);
1596 hci_req_lock(hdev);
1597
1598 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001599 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 hci_req_unlock(hdev);
1601 return 0;
1602 }
1603
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001604 /* Flush RX and TX works */
1605 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001606 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001608 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001609 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001610 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001611 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001612 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001613 }
1614
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001615 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001616 cancel_delayed_work(&hdev->service_cache);
1617
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001618 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001619
1620 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1621 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001622
Johan Hedberg76727c02014-11-18 09:00:14 +02001623 /* Avoid potential lockdep warnings from the *_flush() calls by
1624 * ensuring the workqueue is empty up front.
1625 */
1626 drain_workqueue(hdev->workqueue);
1627
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001628 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001629
1630 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1631 if (hdev->dev_type == HCI_BREDR)
1632 mgmt_powered(hdev, 0);
1633 }
1634
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001635 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001636 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001637 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001638 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639
1640 hci_notify(hdev, HCI_DEV_DOWN);
1641
1642 if (hdev->flush)
1643 hdev->flush(hdev);
1644
1645 /* Reset device */
1646 skb_queue_purge(&hdev->cmd_q);
1647 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02001648 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1649 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001650 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001652 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 clear_bit(HCI_INIT, &hdev->flags);
1654 }
1655
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001656 /* flush cmd work */
1657 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
1659 /* Drop queues */
1660 skb_queue_purge(&hdev->rx_q);
1661 skb_queue_purge(&hdev->cmd_q);
1662 skb_queue_purge(&hdev->raw_q);
1663
1664 /* Drop last sent command */
1665 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001666 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 kfree_skb(hdev->sent_cmd);
1668 hdev->sent_cmd = NULL;
1669 }
1670
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001671 kfree_skb(hdev->recv_evt);
1672 hdev->recv_evt = NULL;
1673
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 /* After this point our queues are empty
1675 * and no tasks are scheduled. */
1676 hdev->close(hdev);
1677
Johan Hedberg35b973c2013-03-15 17:06:59 -05001678 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001679 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001680 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1681
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001682 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001683 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001684
Johan Hedberge59fda82012-02-22 18:11:53 +02001685 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001686 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001687 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001688
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 hci_req_unlock(hdev);
1690
1691 hci_dev_put(hdev);
1692 return 0;
1693}
1694
1695int hci_dev_close(__u16 dev)
1696{
1697 struct hci_dev *hdev;
1698 int err;
1699
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001700 hdev = hci_dev_get(dev);
1701 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001703
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001704 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1705 err = -EBUSY;
1706 goto done;
1707 }
1708
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001709 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1710 cancel_delayed_work(&hdev->power_off);
1711
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001713
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001714done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 hci_dev_put(hdev);
1716 return err;
1717}
1718
1719int hci_dev_reset(__u16 dev)
1720{
1721 struct hci_dev *hdev;
1722 int ret = 0;
1723
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001724 hdev = hci_dev_get(dev);
1725 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 return -ENODEV;
1727
1728 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
Marcel Holtmann808a0492013-08-26 20:57:58 -07001730 if (!test_bit(HCI_UP, &hdev->flags)) {
1731 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001733 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001735 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1736 ret = -EBUSY;
1737 goto done;
1738 }
1739
Marcel Holtmann4a964402014-07-02 19:10:33 +02001740 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001741 ret = -EOPNOTSUPP;
1742 goto done;
1743 }
1744
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 /* Drop queues */
1746 skb_queue_purge(&hdev->rx_q);
1747 skb_queue_purge(&hdev->cmd_q);
1748
Johan Hedberg76727c02014-11-18 09:00:14 +02001749 /* Avoid potential lockdep warnings from the *_flush() calls by
1750 * ensuring the workqueue is empty up front.
1751 */
1752 drain_workqueue(hdev->workqueue);
1753
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001754 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001755 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001757 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758
1759 if (hdev->flush)
1760 hdev->flush(hdev);
1761
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001762 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001763 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001765 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
1767done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 hci_req_unlock(hdev);
1769 hci_dev_put(hdev);
1770 return ret;
1771}
1772
1773int hci_dev_reset_stat(__u16 dev)
1774{
1775 struct hci_dev *hdev;
1776 int ret = 0;
1777
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001778 hdev = hci_dev_get(dev);
1779 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 return -ENODEV;
1781
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001782 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1783 ret = -EBUSY;
1784 goto done;
1785 }
1786
Marcel Holtmann4a964402014-07-02 19:10:33 +02001787 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001788 ret = -EOPNOTSUPP;
1789 goto done;
1790 }
1791
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1793
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001794done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 return ret;
1797}
1798
Johan Hedberg123abc02014-07-10 12:09:07 +03001799static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1800{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001801 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001802
1803 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1804
1805 if ((scan & SCAN_PAGE))
1806 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1807 &hdev->dev_flags);
1808 else
1809 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1810 &hdev->dev_flags);
1811
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001812 if ((scan & SCAN_INQUIRY)) {
1813 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1814 &hdev->dev_flags);
1815 } else {
1816 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1817 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1818 &hdev->dev_flags);
1819 }
1820
Johan Hedberg123abc02014-07-10 12:09:07 +03001821 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1822 return;
1823
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001824 if (conn_changed || discov_changed) {
1825 /* In case this was disabled through mgmt */
1826 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1827
1828 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1829 mgmt_update_adv_data(hdev);
1830
Johan Hedberg123abc02014-07-10 12:09:07 +03001831 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001832 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001833}
1834
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835int hci_dev_cmd(unsigned int cmd, void __user *arg)
1836{
1837 struct hci_dev *hdev;
1838 struct hci_dev_req dr;
1839 int err = 0;
1840
1841 if (copy_from_user(&dr, arg, sizeof(dr)))
1842 return -EFAULT;
1843
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001844 hdev = hci_dev_get(dr.dev_id);
1845 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 return -ENODEV;
1847
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001848 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1849 err = -EBUSY;
1850 goto done;
1851 }
1852
Marcel Holtmann4a964402014-07-02 19:10:33 +02001853 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001854 err = -EOPNOTSUPP;
1855 goto done;
1856 }
1857
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001858 if (hdev->dev_type != HCI_BREDR) {
1859 err = -EOPNOTSUPP;
1860 goto done;
1861 }
1862
Johan Hedberg56f87902013-10-02 13:43:13 +03001863 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1864 err = -EOPNOTSUPP;
1865 goto done;
1866 }
1867
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 switch (cmd) {
1869 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001870 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1871 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 break;
1873
1874 case HCISETENCRYPT:
1875 if (!lmp_encrypt_capable(hdev)) {
1876 err = -EOPNOTSUPP;
1877 break;
1878 }
1879
1880 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1881 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001882 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1883 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 if (err)
1885 break;
1886 }
1887
Johan Hedberg01178cd2013-03-05 20:37:41 +02001888 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1889 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 break;
1891
1892 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001893 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1894 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001895
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001896 /* Ensure that the connectable and discoverable states
1897 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001898 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001899 if (!err)
1900 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 break;
1902
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001903 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001904 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1905 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001906 break;
1907
1908 case HCISETLINKMODE:
1909 hdev->link_mode = ((__u16) dr.dev_opt) &
1910 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1911 break;
1912
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 case HCISETPTYPE:
1914 hdev->pkt_type = (__u16) dr.dev_opt;
1915 break;
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001918 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1919 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 break;
1921
1922 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001923 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1924 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 break;
1926
1927 default:
1928 err = -EINVAL;
1929 break;
1930 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001931
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001932done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 hci_dev_put(hdev);
1934 return err;
1935}
1936
1937int hci_get_dev_list(void __user *arg)
1938{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001939 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 struct hci_dev_list_req *dl;
1941 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 int n = 0, size, err;
1943 __u16 dev_num;
1944
1945 if (get_user(dev_num, (__u16 __user *) arg))
1946 return -EFAULT;
1947
1948 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1949 return -EINVAL;
1950
1951 size = sizeof(*dl) + dev_num * sizeof(*dr);
1952
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001953 dl = kzalloc(size, GFP_KERNEL);
1954 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 return -ENOMEM;
1956
1957 dr = dl->dev_req;
1958
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001959 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001960 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001961 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001962
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001963 /* When the auto-off is configured it means the transport
1964 * is running, but in that case still indicate that the
1965 * device is actually down.
1966 */
1967 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1968 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001969
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001971 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001972
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 if (++n >= dev_num)
1974 break;
1975 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001976 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
1978 dl->dev_num = n;
1979 size = sizeof(*dl) + n * sizeof(*dr);
1980
1981 err = copy_to_user(arg, dl, size);
1982 kfree(dl);
1983
1984 return err ? -EFAULT : 0;
1985}
1986
1987int hci_get_dev_info(void __user *arg)
1988{
1989 struct hci_dev *hdev;
1990 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001991 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 int err = 0;
1993
1994 if (copy_from_user(&di, arg, sizeof(di)))
1995 return -EFAULT;
1996
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001997 hdev = hci_dev_get(di.dev_id);
1998 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 return -ENODEV;
2000
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002001 /* When the auto-off is configured it means the transport
2002 * is running, but in that case still indicate that the
2003 * device is actually down.
2004 */
2005 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2006 flags = hdev->flags & ~BIT(HCI_UP);
2007 else
2008 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002009
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 strcpy(di.name, hdev->name);
2011 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002012 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002013 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002015 if (lmp_bredr_capable(hdev)) {
2016 di.acl_mtu = hdev->acl_mtu;
2017 di.acl_pkts = hdev->acl_pkts;
2018 di.sco_mtu = hdev->sco_mtu;
2019 di.sco_pkts = hdev->sco_pkts;
2020 } else {
2021 di.acl_mtu = hdev->le_mtu;
2022 di.acl_pkts = hdev->le_pkts;
2023 di.sco_mtu = 0;
2024 di.sco_pkts = 0;
2025 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 di.link_policy = hdev->link_policy;
2027 di.link_mode = hdev->link_mode;
2028
2029 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2030 memcpy(&di.features, &hdev->features, sizeof(di.features));
2031
2032 if (copy_to_user(arg, &di, sizeof(di)))
2033 err = -EFAULT;
2034
2035 hci_dev_put(hdev);
2036
2037 return err;
2038}
2039
2040/* ---- Interface to HCI drivers ---- */
2041
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002042static int hci_rfkill_set_block(void *data, bool blocked)
2043{
2044 struct hci_dev *hdev = data;
2045
2046 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2047
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002048 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2049 return -EBUSY;
2050
Johan Hedberg5e130362013-09-13 08:58:17 +03002051 if (blocked) {
2052 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002053 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2054 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002055 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002056 } else {
2057 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002058 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002059
2060 return 0;
2061}
2062
2063static const struct rfkill_ops hci_rfkill_ops = {
2064 .set_block = hci_rfkill_set_block,
2065};
2066
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002067static void hci_power_on(struct work_struct *work)
2068{
2069 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002070 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002071
2072 BT_DBG("%s", hdev->name);
2073
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002074 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002075 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302076 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002077 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302078 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002079 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002080 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002081
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002082 /* During the HCI setup phase, a few error conditions are
2083 * ignored and they need to be checked now. If they are still
2084 * valid, it is important to turn the device back off.
2085 */
2086 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002087 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002088 (hdev->dev_type == HCI_BREDR &&
2089 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2090 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002091 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2092 hci_dev_do_close(hdev);
2093 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002094 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2095 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002096 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002097
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002098 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002099 /* For unconfigured devices, set the HCI_RAW flag
2100 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002101 */
2102 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2103 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002104
2105 /* For fully configured devices, this will send
2106 * the Index Added event. For unconfigured devices,
2107 * it will send Unconfigued Index Added event.
2108 *
2109 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2110 * and no event will be send.
2111 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002112 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002113 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002114 /* When the controller is now configured, then it
2115 * is important to clear the HCI_RAW flag.
2116 */
2117 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2118 clear_bit(HCI_RAW, &hdev->flags);
2119
Marcel Holtmannd603b762014-07-06 12:11:14 +02002120 /* Powering on the controller with HCI_CONFIG set only
2121 * happens with the transition from unconfigured to
2122 * configured. This will send the Index Added event.
2123 */
2124 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002125 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002126}
2127
2128static void hci_power_off(struct work_struct *work)
2129{
Johan Hedberg32435532011-11-07 22:16:04 +02002130 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002131 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002132
2133 BT_DBG("%s", hdev->name);
2134
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002135 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002136}
2137
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002138static void hci_discov_off(struct work_struct *work)
2139{
2140 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002141
2142 hdev = container_of(work, struct hci_dev, discov_off.work);
2143
2144 BT_DBG("%s", hdev->name);
2145
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002146 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002147}
2148
Johan Hedberg35f74982014-02-18 17:14:32 +02002149void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002150{
Johan Hedberg48210022013-01-27 00:31:28 +02002151 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002152
Johan Hedberg48210022013-01-27 00:31:28 +02002153 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2154 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002155 kfree(uuid);
2156 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002157}
2158
Johan Hedberg35f74982014-02-18 17:14:32 +02002159void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002160{
Johan Hedberg0378b592014-11-19 15:22:22 +02002161 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002162
Johan Hedberg0378b592014-11-19 15:22:22 +02002163 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2164 list_del_rcu(&key->list);
2165 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002166 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002167}
2168
Johan Hedberg35f74982014-02-18 17:14:32 +02002169void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002170{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002171 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002172
Johan Hedberg970d0f12014-11-13 14:37:47 +02002173 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2174 list_del_rcu(&k->list);
2175 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002176 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002177}
2178
Johan Hedberg970c4e42014-02-18 10:19:33 +02002179void hci_smp_irks_clear(struct hci_dev *hdev)
2180{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002181 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002182
Johan Hedbergadae20c2014-11-13 14:37:48 +02002183 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2184 list_del_rcu(&k->list);
2185 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002186 }
2187}
2188
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002189struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2190{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002191 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002192
Johan Hedberg0378b592014-11-19 15:22:22 +02002193 rcu_read_lock();
2194 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2195 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2196 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002197 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002198 }
2199 }
2200 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002201
2202 return NULL;
2203}
2204
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302205static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002206 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002207{
2208 /* Legacy key */
2209 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302210 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002211
2212 /* Debug keys are insecure so don't store them persistently */
2213 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302214 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002215
2216 /* Changed combination key and there's no previous one */
2217 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302218 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002219
2220 /* Security mode 3 case */
2221 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302222 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002223
Johan Hedberge3befab2014-06-01 16:33:39 +03002224 /* BR/EDR key derived using SC from an LE link */
2225 if (conn->type == LE_LINK)
2226 return true;
2227
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002228 /* Neither local nor remote side had no-bonding as requirement */
2229 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302230 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002231
2232 /* Local side had dedicated bonding as requirement */
2233 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302234 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002235
2236 /* Remote side had dedicated bonding as requirement */
2237 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302238 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002239
2240 /* If none of the above criteria match, then don't store the key
2241 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302242 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002243}
2244
Johan Hedberge804d252014-07-16 11:42:28 +03002245static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002246{
Johan Hedberge804d252014-07-16 11:42:28 +03002247 if (type == SMP_LTK)
2248 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002249
Johan Hedberge804d252014-07-16 11:42:28 +03002250 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002251}
2252
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002253struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2254 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002255{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002256 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002257
Johan Hedberg970d0f12014-11-13 14:37:47 +02002258 rcu_read_lock();
2259 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002260 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2261 continue;
2262
Johan Hedberg923e2412014-12-03 12:43:39 +02002263 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002264 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002265 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002266 }
2267 }
2268 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002269
2270 return NULL;
2271}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002272
Johan Hedberg970c4e42014-02-18 10:19:33 +02002273struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2274{
2275 struct smp_irk *irk;
2276
Johan Hedbergadae20c2014-11-13 14:37:48 +02002277 rcu_read_lock();
2278 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2279 if (!bacmp(&irk->rpa, rpa)) {
2280 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002281 return irk;
2282 }
2283 }
2284
Johan Hedbergadae20c2014-11-13 14:37:48 +02002285 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2286 if (smp_irk_matches(hdev, irk->val, rpa)) {
2287 bacpy(&irk->rpa, rpa);
2288 rcu_read_unlock();
2289 return irk;
2290 }
2291 }
2292 rcu_read_unlock();
2293
Johan Hedberg970c4e42014-02-18 10:19:33 +02002294 return NULL;
2295}
2296
2297struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2298 u8 addr_type)
2299{
2300 struct smp_irk *irk;
2301
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002302 /* Identity Address must be public or static random */
2303 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2304 return NULL;
2305
Johan Hedbergadae20c2014-11-13 14:37:48 +02002306 rcu_read_lock();
2307 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002308 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002309 bacmp(bdaddr, &irk->bdaddr) == 0) {
2310 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002311 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002312 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002313 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002314 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002315
2316 return NULL;
2317}
2318
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002319struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002320 bdaddr_t *bdaddr, u8 *val, u8 type,
2321 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002322{
2323 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302324 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002325
2326 old_key = hci_find_link_key(hdev, bdaddr);
2327 if (old_key) {
2328 old_key_type = old_key->type;
2329 key = old_key;
2330 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002331 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002332 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002333 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002334 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002335 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002336 }
2337
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002338 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002339
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002340 /* Some buggy controller combinations generate a changed
2341 * combination key for legacy pairing even when there's no
2342 * previous key */
2343 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002344 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002345 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002346 if (conn)
2347 conn->key_type = type;
2348 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002349
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002350 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002351 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002352 key->pin_len = pin_len;
2353
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002354 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002355 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002356 else
2357 key->type = type;
2358
Johan Hedberg7652ff62014-06-24 13:15:49 +03002359 if (persistent)
2360 *persistent = hci_persistent_key(hdev, conn, type,
2361 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002362
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002363 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002364}
2365
Johan Hedbergca9142b2014-02-19 14:57:44 +02002366struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002367 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002368 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002369{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002370 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002371 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002372
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002373 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002374 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002375 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002376 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002377 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002378 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002379 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002380 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002381 }
2382
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002383 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002384 key->bdaddr_type = addr_type;
2385 memcpy(key->val, tk, sizeof(key->val));
2386 key->authenticated = authenticated;
2387 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002388 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002389 key->enc_size = enc_size;
2390 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002391
Johan Hedbergca9142b2014-02-19 14:57:44 +02002392 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002393}
2394
Johan Hedbergca9142b2014-02-19 14:57:44 +02002395struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2396 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002397{
2398 struct smp_irk *irk;
2399
2400 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2401 if (!irk) {
2402 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2403 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002404 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002405
2406 bacpy(&irk->bdaddr, bdaddr);
2407 irk->addr_type = addr_type;
2408
Johan Hedbergadae20c2014-11-13 14:37:48 +02002409 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002410 }
2411
2412 memcpy(irk->val, val, 16);
2413 bacpy(&irk->rpa, rpa);
2414
Johan Hedbergca9142b2014-02-19 14:57:44 +02002415 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002416}
2417
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002418int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2419{
2420 struct link_key *key;
2421
2422 key = hci_find_link_key(hdev, bdaddr);
2423 if (!key)
2424 return -ENOENT;
2425
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002426 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002427
Johan Hedberg0378b592014-11-19 15:22:22 +02002428 list_del_rcu(&key->list);
2429 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002430
2431 return 0;
2432}
2433
Johan Hedberge0b2b272014-02-18 17:14:31 +02002434int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002435{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002436 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002437 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002438
Johan Hedberg970d0f12014-11-13 14:37:47 +02002439 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002440 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002441 continue;
2442
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002443 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002444
Johan Hedberg970d0f12014-11-13 14:37:47 +02002445 list_del_rcu(&k->list);
2446 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002447 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002448 }
2449
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002450 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002451}
2452
Johan Hedberga7ec7332014-02-18 17:14:35 +02002453void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2454{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002455 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002456
Johan Hedbergadae20c2014-11-13 14:37:48 +02002457 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002458 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2459 continue;
2460
2461 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2462
Johan Hedbergadae20c2014-11-13 14:37:48 +02002463 list_del_rcu(&k->list);
2464 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002465 }
2466}
2467
Ville Tervo6bd32322011-02-16 16:32:41 +02002468/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002469static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002470{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002471 struct hci_dev *hdev = container_of(work, struct hci_dev,
2472 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002473
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002474 if (hdev->sent_cmd) {
2475 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2476 u16 opcode = __le16_to_cpu(sent->opcode);
2477
2478 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2479 } else {
2480 BT_ERR("%s command tx timeout", hdev->name);
2481 }
2482
Ville Tervo6bd32322011-02-16 16:32:41 +02002483 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002484 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002485}
2486
Szymon Janc2763eda2011-03-22 13:12:22 +01002487struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002488 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002489{
2490 struct oob_data *data;
2491
Johan Hedberg6928a922014-10-26 20:46:09 +01002492 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2493 if (bacmp(bdaddr, &data->bdaddr) != 0)
2494 continue;
2495 if (data->bdaddr_type != bdaddr_type)
2496 continue;
2497 return data;
2498 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002499
2500 return NULL;
2501}
2502
Johan Hedberg6928a922014-10-26 20:46:09 +01002503int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2504 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002505{
2506 struct oob_data *data;
2507
Johan Hedberg6928a922014-10-26 20:46:09 +01002508 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002509 if (!data)
2510 return -ENOENT;
2511
Johan Hedberg6928a922014-10-26 20:46:09 +01002512 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002513
2514 list_del(&data->list);
2515 kfree(data);
2516
2517 return 0;
2518}
2519
Johan Hedberg35f74982014-02-18 17:14:32 +02002520void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002521{
2522 struct oob_data *data, *n;
2523
2524 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2525 list_del(&data->list);
2526 kfree(data);
2527 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002528}
2529
Marcel Holtmann07988722014-01-10 02:07:29 -08002530int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002531 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002532 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002533{
2534 struct oob_data *data;
2535
Johan Hedberg6928a922014-10-26 20:46:09 +01002536 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002537 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002538 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002539 if (!data)
2540 return -ENOMEM;
2541
2542 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002543 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002544 list_add(&data->list, &hdev->remote_oob_data);
2545 }
2546
Johan Hedberg81328d52014-10-26 20:33:47 +01002547 if (hash192 && rand192) {
2548 memcpy(data->hash192, hash192, sizeof(data->hash192));
2549 memcpy(data->rand192, rand192, sizeof(data->rand192));
2550 } else {
2551 memset(data->hash192, 0, sizeof(data->hash192));
2552 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08002553 }
2554
Johan Hedberg81328d52014-10-26 20:33:47 +01002555 if (hash256 && rand256) {
2556 memcpy(data->hash256, hash256, sizeof(data->hash256));
2557 memcpy(data->rand256, rand256, sizeof(data->rand256));
2558 } else {
2559 memset(data->hash256, 0, sizeof(data->hash256));
2560 memset(data->rand256, 0, sizeof(data->rand256));
2561 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002562
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002563 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002564
2565 return 0;
2566}
2567
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002568struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002569 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002570{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002571 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002572
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002573 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002574 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002575 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002576 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002577
2578 return NULL;
2579}
2580
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002581void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002582{
2583 struct list_head *p, *n;
2584
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002585 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002586 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002587
2588 list_del(p);
2589 kfree(b);
2590 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002591}
2592
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002593int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002594{
2595 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002596
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002597 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002598 return -EBADF;
2599
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002600 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002601 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002602
Johan Hedberg27f70f32014-07-21 10:50:06 +03002603 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002604 if (!entry)
2605 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002606
2607 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002608 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002609
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002610 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002611
2612 return 0;
2613}
2614
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002615int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002616{
2617 struct bdaddr_list *entry;
2618
Johan Hedberg35f74982014-02-18 17:14:32 +02002619 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002620 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002621 return 0;
2622 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002623
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002624 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002625 if (!entry)
2626 return -ENOENT;
2627
2628 list_del(&entry->list);
2629 kfree(entry);
2630
2631 return 0;
2632}
2633
Andre Guedes15819a72014-02-03 13:56:18 -03002634/* This function requires the caller holds hdev->lock */
2635struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2636 bdaddr_t *addr, u8 addr_type)
2637{
2638 struct hci_conn_params *params;
2639
Johan Hedberg738f6182014-07-03 19:33:51 +03002640 /* The conn params list only contains identity addresses */
2641 if (!hci_is_identity_address(addr, addr_type))
2642 return NULL;
2643
Andre Guedes15819a72014-02-03 13:56:18 -03002644 list_for_each_entry(params, &hdev->le_conn_params, list) {
2645 if (bacmp(&params->addr, addr) == 0 &&
2646 params->addr_type == addr_type) {
2647 return params;
2648 }
2649 }
2650
2651 return NULL;
2652}
2653
2654/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002655struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2656 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002657{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002658 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002659
Johan Hedberg738f6182014-07-03 19:33:51 +03002660 /* The list only contains identity addresses */
2661 if (!hci_is_identity_address(addr, addr_type))
2662 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002663
Johan Hedberg501f8822014-07-04 12:37:26 +03002664 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002665 if (bacmp(&param->addr, addr) == 0 &&
2666 param->addr_type == addr_type)
2667 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002668 }
2669
2670 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002671}
2672
2673/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002674struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2675 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002676{
2677 struct hci_conn_params *params;
2678
Johan Hedbergc46245b2014-07-02 17:37:33 +03002679 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002680 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03002681
Andre Guedes15819a72014-02-03 13:56:18 -03002682 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002683 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002684 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002685
2686 params = kzalloc(sizeof(*params), GFP_KERNEL);
2687 if (!params) {
2688 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002689 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002690 }
2691
2692 bacpy(&params->addr, addr);
2693 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002694
2695 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002696 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002697
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002698 params->conn_min_interval = hdev->le_conn_min_interval;
2699 params->conn_max_interval = hdev->le_conn_max_interval;
2700 params->conn_latency = hdev->le_conn_latency;
2701 params->supervision_timeout = hdev->le_supv_timeout;
2702 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2703
2704 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2705
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002706 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002707}
2708
Johan Hedbergf6c63242014-08-15 21:06:59 +03002709static void hci_conn_params_free(struct hci_conn_params *params)
2710{
2711 if (params->conn) {
2712 hci_conn_drop(params->conn);
2713 hci_conn_put(params->conn);
2714 }
2715
2716 list_del(&params->action);
2717 list_del(&params->list);
2718 kfree(params);
2719}
2720
Andre Guedes15819a72014-02-03 13:56:18 -03002721/* This function requires the caller holds hdev->lock */
2722void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2723{
2724 struct hci_conn_params *params;
2725
2726 params = hci_conn_params_lookup(hdev, addr, addr_type);
2727 if (!params)
2728 return;
2729
Johan Hedbergf6c63242014-08-15 21:06:59 +03002730 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002731
Johan Hedberg95305ba2014-07-04 12:37:21 +03002732 hci_update_background_scan(hdev);
2733
Andre Guedes15819a72014-02-03 13:56:18 -03002734 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2735}
2736
2737/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03002738void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002739{
2740 struct hci_conn_params *params, *tmp;
2741
2742 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03002743 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2744 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03002745 list_del(&params->list);
2746 kfree(params);
2747 }
2748
Johan Hedberg55af49a2014-07-02 17:37:26 +03002749 BT_DBG("All LE disabled connection parameters were removed");
2750}
2751
2752/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03002753void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002754{
2755 struct hci_conn_params *params, *tmp;
2756
Johan Hedbergf6c63242014-08-15 21:06:59 +03002757 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2758 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002759
Johan Hedberga2f41a82014-07-04 12:37:19 +03002760 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02002761
Andre Guedes15819a72014-02-03 13:56:18 -03002762 BT_DBG("All LE connection parameters were removed");
2763}
2764
Marcel Holtmann1904a852015-01-11 13:50:44 -08002765static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002766{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002767 if (status) {
2768 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002769
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002770 hci_dev_lock(hdev);
2771 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2772 hci_dev_unlock(hdev);
2773 return;
2774 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002775}
2776
Marcel Holtmann1904a852015-01-11 13:50:44 -08002777static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2778 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002779{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002780 /* General inquiry access code (GIAC) */
2781 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2782 struct hci_request req;
2783 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002784 int err;
2785
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002786 if (status) {
2787 BT_ERR("Failed to disable LE scanning: status %d", status);
2788 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002789 }
2790
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002791 switch (hdev->discovery.type) {
2792 case DISCOV_TYPE_LE:
2793 hci_dev_lock(hdev);
2794 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2795 hci_dev_unlock(hdev);
2796 break;
2797
2798 case DISCOV_TYPE_INTERLEAVED:
2799 hci_req_init(&req, hdev);
2800
2801 memset(&cp, 0, sizeof(cp));
2802 memcpy(&cp.lap, lap, sizeof(cp.lap));
2803 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2804 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2805
2806 hci_dev_lock(hdev);
2807
2808 hci_inquiry_cache_flush(hdev);
2809
2810 err = hci_req_run(&req, inquiry_complete);
2811 if (err) {
2812 BT_ERR("Inquiry request failed: err %d", err);
2813 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2814 }
2815
2816 hci_dev_unlock(hdev);
2817 break;
2818 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002819}
2820
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002821static void le_scan_disable_work(struct work_struct *work)
2822{
2823 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002824 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002825 struct hci_request req;
2826 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002827
2828 BT_DBG("%s", hdev->name);
2829
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002830 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002831
Andre Guedesb1efcc22014-02-26 20:21:40 -03002832 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002833
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002834 err = hci_req_run(&req, le_scan_disable_work_complete);
2835 if (err)
2836 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002837}
2838
Johan Hedberga1f4c312014-02-27 14:05:41 +02002839/* Copy the Identity Address of the controller.
2840 *
2841 * If the controller has a public BD_ADDR, then by default use that one.
2842 * If this is a LE only controller without a public address, default to
2843 * the static random address.
2844 *
2845 * For debugging purposes it is possible to force controllers with a
2846 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002847 *
2848 * In case BR/EDR has been disabled on a dual-mode controller and
2849 * userspace has configured a static address, then that address
2850 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002851 */
2852void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2853 u8 *bdaddr_type)
2854{
Marcel Holtmann111902f2014-06-21 04:53:17 +02002855 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002856 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2857 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2858 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002859 bacpy(bdaddr, &hdev->static_addr);
2860 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2861 } else {
2862 bacpy(bdaddr, &hdev->bdaddr);
2863 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2864 }
2865}
2866
David Herrmann9be0dab2012-04-22 14:39:57 +02002867/* Alloc HCI device */
2868struct hci_dev *hci_alloc_dev(void)
2869{
2870 struct hci_dev *hdev;
2871
Johan Hedberg27f70f32014-07-21 10:50:06 +03002872 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02002873 if (!hdev)
2874 return NULL;
2875
David Herrmannb1b813d2012-04-22 14:39:58 +02002876 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2877 hdev->esco_type = (ESCO_HV1);
2878 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002879 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2880 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02002881 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002882 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2883 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002884
David Herrmannb1b813d2012-04-22 14:39:58 +02002885 hdev->sniff_max_interval = 800;
2886 hdev->sniff_min_interval = 80;
2887
Marcel Holtmann3f959d42014-02-20 11:55:56 -08002888 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02002889 hdev->le_adv_min_interval = 0x0800;
2890 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002891 hdev->le_scan_interval = 0x0060;
2892 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002893 hdev->le_conn_min_interval = 0x0028;
2894 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02002895 hdev->le_conn_latency = 0x0000;
2896 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01002897 hdev->le_def_tx_len = 0x001b;
2898 hdev->le_def_tx_time = 0x0148;
2899 hdev->le_max_tx_len = 0x001b;
2900 hdev->le_max_tx_time = 0x0148;
2901 hdev->le_max_rx_len = 0x001b;
2902 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002903
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002904 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01002905 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02002906 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2907 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002908
David Herrmannb1b813d2012-04-22 14:39:58 +02002909 mutex_init(&hdev->lock);
2910 mutex_init(&hdev->req_lock);
2911
2912 INIT_LIST_HEAD(&hdev->mgmt_pending);
2913 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03002914 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02002915 INIT_LIST_HEAD(&hdev->uuids);
2916 INIT_LIST_HEAD(&hdev->link_keys);
2917 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002918 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02002919 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002920 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03002921 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03002922 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03002923 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002924 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002925
2926 INIT_WORK(&hdev->rx_work, hci_rx_work);
2927 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2928 INIT_WORK(&hdev->tx_work, hci_tx_work);
2929 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002930
David Herrmannb1b813d2012-04-22 14:39:58 +02002931 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2932 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2933 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2934
David Herrmannb1b813d2012-04-22 14:39:58 +02002935 skb_queue_head_init(&hdev->rx_q);
2936 skb_queue_head_init(&hdev->cmd_q);
2937 skb_queue_head_init(&hdev->raw_q);
2938
2939 init_waitqueue_head(&hdev->req_wait_q);
2940
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002941 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02002942
David Herrmannb1b813d2012-04-22 14:39:58 +02002943 hci_init_sysfs(hdev);
2944 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002945
2946 return hdev;
2947}
2948EXPORT_SYMBOL(hci_alloc_dev);
2949
2950/* Free HCI device */
2951void hci_free_dev(struct hci_dev *hdev)
2952{
David Herrmann9be0dab2012-04-22 14:39:57 +02002953 /* will free via device release */
2954 put_device(&hdev->dev);
2955}
2956EXPORT_SYMBOL(hci_free_dev);
2957
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958/* Register HCI device */
2959int hci_register_dev(struct hci_dev *hdev)
2960{
David Herrmannb1b813d2012-04-22 14:39:58 +02002961 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962
Marcel Holtmann74292d52014-07-06 15:50:27 +02002963 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 return -EINVAL;
2965
Mat Martineau08add512011-11-02 16:18:36 -07002966 /* Do not allow HCI_AMP devices to register at index 0,
2967 * so the index can be used as the AMP controller ID.
2968 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002969 switch (hdev->dev_type) {
2970 case HCI_BREDR:
2971 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2972 break;
2973 case HCI_AMP:
2974 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2975 break;
2976 default:
2977 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002979
Sasha Levin3df92b32012-05-27 22:36:56 +02002980 if (id < 0)
2981 return id;
2982
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 sprintf(hdev->name, "hci%d", id);
2984 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002985
2986 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2987
Kees Cookd8537542013-07-03 15:04:57 -07002988 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2989 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002990 if (!hdev->workqueue) {
2991 error = -ENOMEM;
2992 goto err;
2993 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002994
Kees Cookd8537542013-07-03 15:04:57 -07002995 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2996 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002997 if (!hdev->req_workqueue) {
2998 destroy_workqueue(hdev->workqueue);
2999 error = -ENOMEM;
3000 goto err;
3001 }
3002
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003003 if (!IS_ERR_OR_NULL(bt_debugfs))
3004 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3005
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003006 dev_set_name(&hdev->dev, "%s", hdev->name);
3007
3008 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003009 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003010 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003012 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003013 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3014 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003015 if (hdev->rfkill) {
3016 if (rfkill_register(hdev->rfkill) < 0) {
3017 rfkill_destroy(hdev->rfkill);
3018 hdev->rfkill = NULL;
3019 }
3020 }
3021
Johan Hedberg5e130362013-09-13 08:58:17 +03003022 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3023 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3024
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003025 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003026 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003027
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003028 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003029 /* Assume BR/EDR support until proven otherwise (such as
3030 * through reading supported features during init.
3031 */
3032 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3033 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003034
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003035 write_lock(&hci_dev_list_lock);
3036 list_add(&hdev->list, &hci_dev_list);
3037 write_unlock(&hci_dev_list_lock);
3038
Marcel Holtmann4a964402014-07-02 19:10:33 +02003039 /* Devices that are marked for raw-only usage are unconfigured
3040 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003041 */
3042 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02003043 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003044
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003046 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047
Johan Hedberg19202572013-01-14 22:33:51 +02003048 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003049
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003051
David Herrmann33ca9542011-10-08 14:58:49 +02003052err_wqueue:
3053 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003054 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003055err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003056 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003057
David Herrmann33ca9542011-10-08 14:58:49 +02003058 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059}
3060EXPORT_SYMBOL(hci_register_dev);
3061
3062/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003063void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064{
Sasha Levin3df92b32012-05-27 22:36:56 +02003065 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003066
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003067 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068
Johan Hovold94324962012-03-15 14:48:41 +01003069 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3070
Sasha Levin3df92b32012-05-27 22:36:56 +02003071 id = hdev->id;
3072
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003073 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003075 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076
3077 hci_dev_do_close(hdev);
3078
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303079 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003080 kfree_skb(hdev->reassembly[i]);
3081
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003082 cancel_work_sync(&hdev->power_on);
3083
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003084 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02003085 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3086 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003087 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003088 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003089 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003090 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003091
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003092 /* mgmt_index_removed should take care of emptying the
3093 * pending list */
3094 BUG_ON(!list_empty(&hdev->mgmt_pending));
3095
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096 hci_notify(hdev, HCI_DEV_UNREG);
3097
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003098 if (hdev->rfkill) {
3099 rfkill_unregister(hdev->rfkill);
3100 rfkill_destroy(hdev->rfkill);
3101 }
3102
Johan Hedberg711eafe2014-08-08 09:32:52 +03003103 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02003104
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003105 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003106
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003107 debugfs_remove_recursive(hdev->debugfs);
3108
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003109 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003110 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003111
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003112 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003113 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003114 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003115 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003116 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003117 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003118 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003119 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003120 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003121 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003122 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003123 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003124
David Herrmanndc946bd2012-01-07 15:47:24 +01003125 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003126
3127 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128}
3129EXPORT_SYMBOL(hci_unregister_dev);
3130
3131/* Suspend HCI device */
3132int hci_suspend_dev(struct hci_dev *hdev)
3133{
3134 hci_notify(hdev, HCI_DEV_SUSPEND);
3135 return 0;
3136}
3137EXPORT_SYMBOL(hci_suspend_dev);
3138
3139/* Resume HCI device */
3140int hci_resume_dev(struct hci_dev *hdev)
3141{
3142 hci_notify(hdev, HCI_DEV_RESUME);
3143 return 0;
3144}
3145EXPORT_SYMBOL(hci_resume_dev);
3146
Marcel Holtmann75e05692014-11-02 08:15:38 +01003147/* Reset HCI device */
3148int hci_reset_dev(struct hci_dev *hdev)
3149{
3150 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3151 struct sk_buff *skb;
3152
3153 skb = bt_skb_alloc(3, GFP_ATOMIC);
3154 if (!skb)
3155 return -ENOMEM;
3156
3157 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3158 memcpy(skb_put(skb, 3), hw_err, 3);
3159
3160 /* Send Hardware Error to upper stack */
3161 return hci_recv_frame(hdev, skb);
3162}
3163EXPORT_SYMBOL(hci_reset_dev);
3164
Marcel Holtmann76bca882009-11-18 00:40:39 +01003165/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003166int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003167{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003168 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003169 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003170 kfree_skb(skb);
3171 return -ENXIO;
3172 }
3173
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003174 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003175 bt_cb(skb)->incoming = 1;
3176
3177 /* Time stamp */
3178 __net_timestamp(skb);
3179
Marcel Holtmann76bca882009-11-18 00:40:39 +01003180 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003181 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003182
Marcel Holtmann76bca882009-11-18 00:40:39 +01003183 return 0;
3184}
3185EXPORT_SYMBOL(hci_recv_frame);
3186
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303187static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003188 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303189{
3190 int len = 0;
3191 int hlen = 0;
3192 int remain = count;
3193 struct sk_buff *skb;
3194 struct bt_skb_cb *scb;
3195
3196 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003197 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303198 return -EILSEQ;
3199
3200 skb = hdev->reassembly[index];
3201
3202 if (!skb) {
3203 switch (type) {
3204 case HCI_ACLDATA_PKT:
3205 len = HCI_MAX_FRAME_SIZE;
3206 hlen = HCI_ACL_HDR_SIZE;
3207 break;
3208 case HCI_EVENT_PKT:
3209 len = HCI_MAX_EVENT_SIZE;
3210 hlen = HCI_EVENT_HDR_SIZE;
3211 break;
3212 case HCI_SCODATA_PKT:
3213 len = HCI_MAX_SCO_SIZE;
3214 hlen = HCI_SCO_HDR_SIZE;
3215 break;
3216 }
3217
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003218 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303219 if (!skb)
3220 return -ENOMEM;
3221
3222 scb = (void *) skb->cb;
3223 scb->expect = hlen;
3224 scb->pkt_type = type;
3225
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303226 hdev->reassembly[index] = skb;
3227 }
3228
3229 while (count) {
3230 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003231 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303232
3233 memcpy(skb_put(skb, len), data, len);
3234
3235 count -= len;
3236 data += len;
3237 scb->expect -= len;
3238 remain = count;
3239
3240 switch (type) {
3241 case HCI_EVENT_PKT:
3242 if (skb->len == HCI_EVENT_HDR_SIZE) {
3243 struct hci_event_hdr *h = hci_event_hdr(skb);
3244 scb->expect = h->plen;
3245
3246 if (skb_tailroom(skb) < scb->expect) {
3247 kfree_skb(skb);
3248 hdev->reassembly[index] = NULL;
3249 return -ENOMEM;
3250 }
3251 }
3252 break;
3253
3254 case HCI_ACLDATA_PKT:
3255 if (skb->len == HCI_ACL_HDR_SIZE) {
3256 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3257 scb->expect = __le16_to_cpu(h->dlen);
3258
3259 if (skb_tailroom(skb) < scb->expect) {
3260 kfree_skb(skb);
3261 hdev->reassembly[index] = NULL;
3262 return -ENOMEM;
3263 }
3264 }
3265 break;
3266
3267 case HCI_SCODATA_PKT:
3268 if (skb->len == HCI_SCO_HDR_SIZE) {
3269 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3270 scb->expect = h->dlen;
3271
3272 if (skb_tailroom(skb) < scb->expect) {
3273 kfree_skb(skb);
3274 hdev->reassembly[index] = NULL;
3275 return -ENOMEM;
3276 }
3277 }
3278 break;
3279 }
3280
3281 if (scb->expect == 0) {
3282 /* Complete frame */
3283
3284 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003285 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303286
3287 hdev->reassembly[index] = NULL;
3288 return remain;
3289 }
3290 }
3291
3292 return remain;
3293}
3294
Suraj Sumangala99811512010-07-14 13:02:19 +05303295#define STREAM_REASSEMBLY 0
3296
3297int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3298{
3299 int type;
3300 int rem = 0;
3301
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003302 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303303 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3304
3305 if (!skb) {
3306 struct { char type; } *pkt;
3307
3308 /* Start of the frame */
3309 pkt = data;
3310 type = pkt->type;
3311
3312 data++;
3313 count--;
3314 } else
3315 type = bt_cb(skb)->pkt_type;
3316
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003317 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003318 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303319 if (rem < 0)
3320 return rem;
3321
3322 data += (count - rem);
3323 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003324 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303325
3326 return rem;
3327}
3328EXPORT_SYMBOL(hci_recv_stream_fragment);
3329
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330/* ---- Interface to upper protocols ---- */
3331
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332int hci_register_cb(struct hci_cb *cb)
3333{
3334 BT_DBG("%p name %s", cb, cb->name);
3335
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003336 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003338 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339
3340 return 0;
3341}
3342EXPORT_SYMBOL(hci_register_cb);
3343
3344int hci_unregister_cb(struct hci_cb *cb)
3345{
3346 BT_DBG("%p name %s", cb, cb->name);
3347
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003348 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003350 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351
3352 return 0;
3353}
3354EXPORT_SYMBOL(hci_unregister_cb);
3355
Marcel Holtmann51086992013-10-10 14:54:19 -07003356static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003358 int err;
3359
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003360 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003362 /* Time stamp */
3363 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003365 /* Send copy to monitor */
3366 hci_send_to_monitor(hdev, skb);
3367
3368 if (atomic_read(&hdev->promisc)) {
3369 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003370 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 }
3372
3373 /* Get rid of skb owner, prior to sending to the driver. */
3374 skb_orphan(skb);
3375
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003376 err = hdev->send(hdev, skb);
3377 if (err < 0) {
3378 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3379 kfree_skb(skb);
3380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381}
3382
Marcel Holtmann899de762014-07-11 05:51:58 +02003383bool hci_req_pending(struct hci_dev *hdev)
3384{
3385 return (hdev->req_status == HCI_REQ_PEND);
3386}
3387
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003388/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003389int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3390 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003391{
3392 struct sk_buff *skb;
3393
3394 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3395
3396 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3397 if (!skb) {
3398 BT_ERR("%s no memory for command", hdev->name);
3399 return -ENOMEM;
3400 }
3401
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003402 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003403 * single-command requests.
3404 */
3405 bt_cb(skb)->req.start = true;
3406
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003408 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409
3410 return 0;
3411}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412
3413/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003414void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415{
3416 struct hci_command_hdr *hdr;
3417
3418 if (!hdev->sent_cmd)
3419 return NULL;
3420
3421 hdr = (void *) hdev->sent_cmd->data;
3422
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003423 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 return NULL;
3425
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003426 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427
3428 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3429}
3430
3431/* Send ACL data */
3432static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3433{
3434 struct hci_acl_hdr *hdr;
3435 int len = skb->len;
3436
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003437 skb_push(skb, HCI_ACL_HDR_SIZE);
3438 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003439 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003440 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3441 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442}
3443
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003444static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003445 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003447 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448 struct hci_dev *hdev = conn->hdev;
3449 struct sk_buff *list;
3450
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003451 skb->len = skb_headlen(skb);
3452 skb->data_len = 0;
3453
3454 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003455
3456 switch (hdev->dev_type) {
3457 case HCI_BREDR:
3458 hci_add_acl_hdr(skb, conn->handle, flags);
3459 break;
3460 case HCI_AMP:
3461 hci_add_acl_hdr(skb, chan->handle, flags);
3462 break;
3463 default:
3464 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3465 return;
3466 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003467
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003468 list = skb_shinfo(skb)->frag_list;
3469 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 /* Non fragmented */
3471 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3472
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003473 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474 } else {
3475 /* Fragmented */
3476 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3477
3478 skb_shinfo(skb)->frag_list = NULL;
3479
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003480 /* Queue all fragments atomically. We need to use spin_lock_bh
3481 * here because of 6LoWPAN links, as there this function is
3482 * called from softirq and using normal spin lock could cause
3483 * deadlocks.
3484 */
3485 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003487 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003488
3489 flags &= ~ACL_START;
3490 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 do {
3492 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003493
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003494 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003495 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496
3497 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3498
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003499 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500 } while (list);
3501
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003502 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003504}
3505
3506void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3507{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003508 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003509
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003510 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003511
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003512 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003514 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516
3517/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003518void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519{
3520 struct hci_dev *hdev = conn->hdev;
3521 struct hci_sco_hdr hdr;
3522
3523 BT_DBG("%s len %d", hdev->name, skb->len);
3524
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003525 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526 hdr.dlen = skb->len;
3527
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003528 skb_push(skb, HCI_SCO_HDR_SIZE);
3529 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003530 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003532 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003533
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003535 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537
3538/* ---- HCI TX task (outgoing data) ---- */
3539
3540/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003541static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3542 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543{
3544 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003545 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003546 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003548 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003550
3551 rcu_read_lock();
3552
3553 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003554 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003556
3557 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3558 continue;
3559
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560 num++;
3561
3562 if (c->sent < min) {
3563 min = c->sent;
3564 conn = c;
3565 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003566
3567 if (hci_conn_num(hdev, type) == num)
3568 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569 }
3570
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003571 rcu_read_unlock();
3572
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003574 int cnt, q;
3575
3576 switch (conn->type) {
3577 case ACL_LINK:
3578 cnt = hdev->acl_cnt;
3579 break;
3580 case SCO_LINK:
3581 case ESCO_LINK:
3582 cnt = hdev->sco_cnt;
3583 break;
3584 case LE_LINK:
3585 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3586 break;
3587 default:
3588 cnt = 0;
3589 BT_ERR("Unknown link type");
3590 }
3591
3592 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593 *quote = q ? q : 1;
3594 } else
3595 *quote = 0;
3596
3597 BT_DBG("conn %p quote %d", conn, *quote);
3598 return conn;
3599}
3600
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003601static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602{
3603 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003604 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605
Ville Tervobae1f5d92011-02-10 22:38:53 -03003606 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003608 rcu_read_lock();
3609
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003611 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003612 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003613 BT_ERR("%s killing stalled connection %pMR",
3614 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003615 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616 }
3617 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003618
3619 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620}
3621
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003622static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3623 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003624{
3625 struct hci_conn_hash *h = &hdev->conn_hash;
3626 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003627 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003628 struct hci_conn *conn;
3629 int cnt, q, conn_num = 0;
3630
3631 BT_DBG("%s", hdev->name);
3632
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003633 rcu_read_lock();
3634
3635 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003636 struct hci_chan *tmp;
3637
3638 if (conn->type != type)
3639 continue;
3640
3641 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3642 continue;
3643
3644 conn_num++;
3645
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003646 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003647 struct sk_buff *skb;
3648
3649 if (skb_queue_empty(&tmp->data_q))
3650 continue;
3651
3652 skb = skb_peek(&tmp->data_q);
3653 if (skb->priority < cur_prio)
3654 continue;
3655
3656 if (skb->priority > cur_prio) {
3657 num = 0;
3658 min = ~0;
3659 cur_prio = skb->priority;
3660 }
3661
3662 num++;
3663
3664 if (conn->sent < min) {
3665 min = conn->sent;
3666 chan = tmp;
3667 }
3668 }
3669
3670 if (hci_conn_num(hdev, type) == conn_num)
3671 break;
3672 }
3673
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003674 rcu_read_unlock();
3675
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003676 if (!chan)
3677 return NULL;
3678
3679 switch (chan->conn->type) {
3680 case ACL_LINK:
3681 cnt = hdev->acl_cnt;
3682 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003683 case AMP_LINK:
3684 cnt = hdev->block_cnt;
3685 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003686 case SCO_LINK:
3687 case ESCO_LINK:
3688 cnt = hdev->sco_cnt;
3689 break;
3690 case LE_LINK:
3691 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3692 break;
3693 default:
3694 cnt = 0;
3695 BT_ERR("Unknown link type");
3696 }
3697
3698 q = cnt / num;
3699 *quote = q ? q : 1;
3700 BT_DBG("chan %p quote %d", chan, *quote);
3701 return chan;
3702}
3703
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003704static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3705{
3706 struct hci_conn_hash *h = &hdev->conn_hash;
3707 struct hci_conn *conn;
3708 int num = 0;
3709
3710 BT_DBG("%s", hdev->name);
3711
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003712 rcu_read_lock();
3713
3714 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003715 struct hci_chan *chan;
3716
3717 if (conn->type != type)
3718 continue;
3719
3720 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3721 continue;
3722
3723 num++;
3724
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003725 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003726 struct sk_buff *skb;
3727
3728 if (chan->sent) {
3729 chan->sent = 0;
3730 continue;
3731 }
3732
3733 if (skb_queue_empty(&chan->data_q))
3734 continue;
3735
3736 skb = skb_peek(&chan->data_q);
3737 if (skb->priority >= HCI_PRIO_MAX - 1)
3738 continue;
3739
3740 skb->priority = HCI_PRIO_MAX - 1;
3741
3742 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003743 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003744 }
3745
3746 if (hci_conn_num(hdev, type) == num)
3747 break;
3748 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003749
3750 rcu_read_unlock();
3751
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003752}
3753
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003754static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3755{
3756 /* Calculate count of blocks used by this packet */
3757 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3758}
3759
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003760static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761{
Marcel Holtmann4a964402014-07-02 19:10:33 +02003762 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763 /* ACL tx timeout must be longer than maximum
3764 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003765 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003766 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003767 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003769}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003771static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003772{
3773 unsigned int cnt = hdev->acl_cnt;
3774 struct hci_chan *chan;
3775 struct sk_buff *skb;
3776 int quote;
3777
3778 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003779
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003780 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003781 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003782 u32 priority = (skb_peek(&chan->data_q))->priority;
3783 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003784 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003785 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003786
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003787 /* Stop if priority has changed */
3788 if (skb->priority < priority)
3789 break;
3790
3791 skb = skb_dequeue(&chan->data_q);
3792
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003793 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003794 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003795
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003796 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797 hdev->acl_last_tx = jiffies;
3798
3799 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003800 chan->sent++;
3801 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802 }
3803 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003804
3805 if (cnt != hdev->acl_cnt)
3806 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807}
3808
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003809static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003810{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003811 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003812 struct hci_chan *chan;
3813 struct sk_buff *skb;
3814 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003815 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003816
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003817 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003818
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003819 BT_DBG("%s", hdev->name);
3820
3821 if (hdev->dev_type == HCI_AMP)
3822 type = AMP_LINK;
3823 else
3824 type = ACL_LINK;
3825
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003826 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003827 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003828 u32 priority = (skb_peek(&chan->data_q))->priority;
3829 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3830 int blocks;
3831
3832 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003833 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003834
3835 /* Stop if priority has changed */
3836 if (skb->priority < priority)
3837 break;
3838
3839 skb = skb_dequeue(&chan->data_q);
3840
3841 blocks = __get_blocks(hdev, skb);
3842 if (blocks > hdev->block_cnt)
3843 return;
3844
3845 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003846 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003847
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003848 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003849 hdev->acl_last_tx = jiffies;
3850
3851 hdev->block_cnt -= blocks;
3852 quote -= blocks;
3853
3854 chan->sent += blocks;
3855 chan->conn->sent += blocks;
3856 }
3857 }
3858
3859 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003860 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003861}
3862
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003863static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003864{
3865 BT_DBG("%s", hdev->name);
3866
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003867 /* No ACL link over BR/EDR controller */
3868 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3869 return;
3870
3871 /* No AMP link over AMP controller */
3872 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003873 return;
3874
3875 switch (hdev->flow_ctl_mode) {
3876 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3877 hci_sched_acl_pkt(hdev);
3878 break;
3879
3880 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3881 hci_sched_acl_blk(hdev);
3882 break;
3883 }
3884}
3885
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003887static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888{
3889 struct hci_conn *conn;
3890 struct sk_buff *skb;
3891 int quote;
3892
3893 BT_DBG("%s", hdev->name);
3894
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003895 if (!hci_conn_num(hdev, SCO_LINK))
3896 return;
3897
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3899 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3900 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003901 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902
3903 conn->sent++;
3904 if (conn->sent == ~0)
3905 conn->sent = 0;
3906 }
3907 }
3908}
3909
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003910static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003911{
3912 struct hci_conn *conn;
3913 struct sk_buff *skb;
3914 int quote;
3915
3916 BT_DBG("%s", hdev->name);
3917
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003918 if (!hci_conn_num(hdev, ESCO_LINK))
3919 return;
3920
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003921 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3922 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003923 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3924 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003925 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003926
3927 conn->sent++;
3928 if (conn->sent == ~0)
3929 conn->sent = 0;
3930 }
3931 }
3932}
3933
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003934static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003935{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003936 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003937 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003938 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003939
3940 BT_DBG("%s", hdev->name);
3941
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003942 if (!hci_conn_num(hdev, LE_LINK))
3943 return;
3944
Marcel Holtmann4a964402014-07-02 19:10:33 +02003945 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003946 /* LE tx timeout must be longer than maximum
3947 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003948 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003949 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003950 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003951 }
3952
3953 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003954 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003955 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003956 u32 priority = (skb_peek(&chan->data_q))->priority;
3957 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003958 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003959 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003960
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003961 /* Stop if priority has changed */
3962 if (skb->priority < priority)
3963 break;
3964
3965 skb = skb_dequeue(&chan->data_q);
3966
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003967 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003968 hdev->le_last_tx = jiffies;
3969
3970 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003971 chan->sent++;
3972 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003973 }
3974 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003975
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003976 if (hdev->le_pkts)
3977 hdev->le_cnt = cnt;
3978 else
3979 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003980
3981 if (cnt != tmp)
3982 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003983}
3984
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003985static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003987 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988 struct sk_buff *skb;
3989
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003990 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003991 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992
Marcel Holtmann52de5992013-09-03 18:08:38 -07003993 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3994 /* Schedule queues and send stuff to HCI driver */
3995 hci_sched_acl(hdev);
3996 hci_sched_sco(hdev);
3997 hci_sched_esco(hdev);
3998 hci_sched_le(hdev);
3999 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004000
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001 /* Send next queued raw (unknown type) packet */
4002 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004003 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004}
4005
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004006/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007
4008/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004009static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010{
4011 struct hci_acl_hdr *hdr = (void *) skb->data;
4012 struct hci_conn *conn;
4013 __u16 handle, flags;
4014
4015 skb_pull(skb, HCI_ACL_HDR_SIZE);
4016
4017 handle = __le16_to_cpu(hdr->handle);
4018 flags = hci_flags(handle);
4019 handle = hci_handle(handle);
4020
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004021 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004022 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023
4024 hdev->stat.acl_rx++;
4025
4026 hci_dev_lock(hdev);
4027 conn = hci_conn_hash_lookup_handle(hdev, handle);
4028 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004029
Linus Torvalds1da177e2005-04-16 15:20:36 -07004030 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004031 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004032
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004034 l2cap_recv_acldata(conn, skb, flags);
4035 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004037 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004038 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039 }
4040
4041 kfree_skb(skb);
4042}
4043
4044/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004045static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046{
4047 struct hci_sco_hdr *hdr = (void *) skb->data;
4048 struct hci_conn *conn;
4049 __u16 handle;
4050
4051 skb_pull(skb, HCI_SCO_HDR_SIZE);
4052
4053 handle = __le16_to_cpu(hdr->handle);
4054
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004055 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004056
4057 hdev->stat.sco_rx++;
4058
4059 hci_dev_lock(hdev);
4060 conn = hci_conn_hash_lookup_handle(hdev, handle);
4061 hci_dev_unlock(hdev);
4062
4063 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004065 sco_recv_scodata(conn, skb);
4066 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004068 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004069 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070 }
4071
4072 kfree_skb(skb);
4073}
4074
Johan Hedberg9238f362013-03-05 20:37:48 +02004075static bool hci_req_is_complete(struct hci_dev *hdev)
4076{
4077 struct sk_buff *skb;
4078
4079 skb = skb_peek(&hdev->cmd_q);
4080 if (!skb)
4081 return true;
4082
4083 return bt_cb(skb)->req.start;
4084}
4085
Johan Hedberg42c6b122013-03-05 20:37:49 +02004086static void hci_resend_last(struct hci_dev *hdev)
4087{
4088 struct hci_command_hdr *sent;
4089 struct sk_buff *skb;
4090 u16 opcode;
4091
4092 if (!hdev->sent_cmd)
4093 return;
4094
4095 sent = (void *) hdev->sent_cmd->data;
4096 opcode = __le16_to_cpu(sent->opcode);
4097 if (opcode == HCI_OP_RESET)
4098 return;
4099
4100 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4101 if (!skb)
4102 return;
4103
4104 skb_queue_head(&hdev->cmd_q, skb);
4105 queue_work(hdev->workqueue, &hdev->cmd_work);
4106}
4107
Johan Hedberg9238f362013-03-05 20:37:48 +02004108void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4109{
4110 hci_req_complete_t req_complete = NULL;
4111 struct sk_buff *skb;
4112 unsigned long flags;
4113
4114 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4115
Johan Hedberg42c6b122013-03-05 20:37:49 +02004116 /* If the completed command doesn't match the last one that was
4117 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004118 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004119 if (!hci_sent_cmd_data(hdev, opcode)) {
4120 /* Some CSR based controllers generate a spontaneous
4121 * reset complete event during init and any pending
4122 * command will never be completed. In such a case we
4123 * need to resend whatever was the last sent
4124 * command.
4125 */
4126 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4127 hci_resend_last(hdev);
4128
Johan Hedberg9238f362013-03-05 20:37:48 +02004129 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004130 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004131
4132 /* If the command succeeded and there's still more commands in
4133 * this request the request is not yet complete.
4134 */
4135 if (!status && !hci_req_is_complete(hdev))
4136 return;
4137
4138 /* If this was the last command in a request the complete
4139 * callback would be found in hdev->sent_cmd instead of the
4140 * command queue (hdev->cmd_q).
4141 */
4142 if (hdev->sent_cmd) {
4143 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004144
4145 if (req_complete) {
4146 /* We must set the complete callback to NULL to
4147 * avoid calling the callback more than once if
4148 * this function gets called again.
4149 */
4150 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4151
Johan Hedberg9238f362013-03-05 20:37:48 +02004152 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004153 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004154 }
4155
4156 /* Remove all pending commands belonging to this request */
4157 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4158 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4159 if (bt_cb(skb)->req.start) {
4160 __skb_queue_head(&hdev->cmd_q, skb);
4161 break;
4162 }
4163
4164 req_complete = bt_cb(skb)->req.complete;
4165 kfree_skb(skb);
4166 }
4167 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4168
4169call_complete:
4170 if (req_complete)
Marcel Holtmann1904a852015-01-11 13:50:44 -08004171 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
Johan Hedberg9238f362013-03-05 20:37:48 +02004172}
4173
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004174static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004176 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177 struct sk_buff *skb;
4178
4179 BT_DBG("%s", hdev->name);
4180
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004182 /* Send copy to monitor */
4183 hci_send_to_monitor(hdev, skb);
4184
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 if (atomic_read(&hdev->promisc)) {
4186 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004187 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 }
4189
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004190 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191 kfree_skb(skb);
4192 continue;
4193 }
4194
4195 if (test_bit(HCI_INIT, &hdev->flags)) {
4196 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004197 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198 case HCI_ACLDATA_PKT:
4199 case HCI_SCODATA_PKT:
4200 kfree_skb(skb);
4201 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004203 }
4204
4205 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004206 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004208 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 hci_event_packet(hdev, skb);
4210 break;
4211
4212 case HCI_ACLDATA_PKT:
4213 BT_DBG("%s ACL data packet", hdev->name);
4214 hci_acldata_packet(hdev, skb);
4215 break;
4216
4217 case HCI_SCODATA_PKT:
4218 BT_DBG("%s SCO data packet", hdev->name);
4219 hci_scodata_packet(hdev, skb);
4220 break;
4221
4222 default:
4223 kfree_skb(skb);
4224 break;
4225 }
4226 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227}
4228
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004229static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004231 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232 struct sk_buff *skb;
4233
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004234 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4235 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004238 if (atomic_read(&hdev->cmd_cnt)) {
4239 skb = skb_dequeue(&hdev->cmd_q);
4240 if (!skb)
4241 return;
4242
Wei Yongjun7585b972009-02-25 18:29:52 +08004243 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004245 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004246 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004248 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004249 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004250 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004251 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004252 schedule_delayed_work(&hdev->cmd_timer,
4253 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254 } else {
4255 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004256 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257 }
4258 }
4259}