blob: 47f0311d10064886f3aaed329e62120de887a582 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
54DEFINE_RWLOCK(hci_cb_list_lock);
55
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmann111902f2014-06-21 04:53:17 +020083 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
Marcel Holtmann111902f2014-06-21 04:53:17 +0200109 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
Marcel Holtmann111902f2014-06-21 04:53:17 +0200130 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* ---- HCI requests ---- */
143
Johan Hedberg42c6b122013-03-05 20:37:49 +0200144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200146 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 if (hdev->req_status == HCI_REQ_PEND) {
149 hdev->req_result = result;
150 hdev->req_status = HCI_REQ_DONE;
151 wake_up_interruptible(&hdev->req_wait_q);
152 }
153}
154
155static void hci_req_cancel(struct hci_dev *hdev, int err)
156{
157 BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159 if (hdev->req_status == HCI_REQ_PEND) {
160 hdev->req_result = err;
161 hdev->req_status = HCI_REQ_CANCELED;
162 wake_up_interruptible(&hdev->req_wait_q);
163 }
164}
165
Fengguang Wu77a63e02013-04-20 16:24:31 +0300166static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300168{
169 struct hci_ev_cmd_complete *ev;
170 struct hci_event_hdr *hdr;
171 struct sk_buff *skb;
172
173 hci_dev_lock(hdev);
174
175 skb = hdev->recv_evt;
176 hdev->recv_evt = NULL;
177
178 hci_dev_unlock(hdev);
179
180 if (!skb)
181 return ERR_PTR(-ENODATA);
182
183 if (skb->len < sizeof(*hdr)) {
184 BT_ERR("Too short HCI event");
185 goto failed;
186 }
187
188 hdr = (void *) skb->data;
189 skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300191 if (event) {
192 if (hdr->evt != event)
193 goto failed;
194 return skb;
195 }
196
Johan Hedberg75e84b72013-04-02 13:35:04 +0300197 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199 goto failed;
200 }
201
202 if (skb->len < sizeof(*ev)) {
203 BT_ERR("Too short cmd_complete event");
204 goto failed;
205 }
206
207 ev = (void *) skb->data;
208 skb_pull(skb, sizeof(*ev));
209
210 if (opcode == __le16_to_cpu(ev->opcode))
211 return skb;
212
213 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214 __le16_to_cpu(ev->opcode));
215
216failed:
217 kfree_skb(skb);
218 return ERR_PTR(-ENODATA);
219}
220
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300221struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300222 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300223{
224 DECLARE_WAITQUEUE(wait, current);
225 struct hci_request req;
226 int err = 0;
227
228 BT_DBG("%s", hdev->name);
229
230 hci_req_init(&req, hdev);
231
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300232 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300233
234 hdev->req_status = HCI_REQ_PEND;
235
Johan Hedberg75e84b72013-04-02 13:35:04 +0300236 add_wait_queue(&hdev->req_wait_q, &wait);
237 set_current_state(TASK_INTERRUPTIBLE);
238
Chan-yeol Park039fada2014-10-31 14:23:06 +0900239 err = hci_req_run(&req, hci_req_sync_complete);
240 if (err < 0) {
241 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200242 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900243 return ERR_PTR(err);
244 }
245
Johan Hedberg75e84b72013-04-02 13:35:04 +0300246 schedule_timeout(timeout);
247
248 remove_wait_queue(&hdev->req_wait_q, &wait);
249
250 if (signal_pending(current))
251 return ERR_PTR(-EINTR);
252
253 switch (hdev->req_status) {
254 case HCI_REQ_DONE:
255 err = -bt_to_errno(hdev->req_result);
256 break;
257
258 case HCI_REQ_CANCELED:
259 err = -hdev->req_result;
260 break;
261
262 default:
263 err = -ETIMEDOUT;
264 break;
265 }
266
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 if (err < 0)
272 return ERR_PTR(err);
273
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300274 return hci_get_cmd_complete(hdev, opcode, event);
275}
276EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300279 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300280{
281 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300282}
283EXPORT_SYMBOL(__hci_cmd_sync);
284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200286static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200287 void (*func)(struct hci_request *req,
288 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200289 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 DECLARE_WAITQUEUE(wait, current);
293 int err = 0;
294
295 BT_DBG("%s start", hdev->name);
296
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_init(&req, hdev);
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 hdev->req_status = HCI_REQ_PEND;
300
Johan Hedberg42c6b122013-03-05 20:37:49 +0200301 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200302
Chan-yeol Park039fada2014-10-31 14:23:06 +0900303 add_wait_queue(&hdev->req_wait_q, &wait);
304 set_current_state(TASK_INTERRUPTIBLE);
305
Johan Hedberg42c6b122013-03-05 20:37:49 +0200306 err = hci_req_run(&req, hci_req_sync_complete);
307 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200308 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300309
Chan-yeol Park039fada2014-10-31 14:23:06 +0900310 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200311 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900312
Andre Guedes920c8302013-03-08 11:20:15 -0300313 /* ENODATA means the HCI request command queue is empty.
314 * This can happen when a request with conditionals doesn't
315 * trigger any commands to be sent. This is normal behavior
316 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 */
Andre Guedes920c8302013-03-08 11:20:15 -0300318 if (err == -ENODATA)
319 return 0;
320
321 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200322 }
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 schedule_timeout(timeout);
325
326 remove_wait_queue(&hdev->req_wait_q, &wait);
327
328 if (signal_pending(current))
329 return -EINTR;
330
331 switch (hdev->req_status) {
332 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700333 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 break;
335
336 case HCI_REQ_CANCELED:
337 err = -hdev->req_result;
338 break;
339
340 default:
341 err = -ETIMEDOUT;
342 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Johan Hedberga5040ef2011-01-10 13:28:59 +0200345 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 BT_DBG("%s end: err %d", hdev->name, err);
348
349 return err;
350}
351
Johan Hedberg01178cd2013-03-05 20:37:41 +0200352static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200353 void (*req)(struct hci_request *req,
354 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200355 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
357 int ret;
358
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200359 if (!test_bit(HCI_UP, &hdev->flags))
360 return -ENETDOWN;
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 /* Serialize all requests */
363 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200364 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 hci_req_unlock(hdev);
366
367 return ret;
368}
369
Johan Hedberg42c6b122013-03-05 20:37:49 +0200370static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200372 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200375 set_bit(HCI_RESET, &req->hdev->flags);
376 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200384 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200386 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200388
389 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
392
Johan Hedberg42c6b122013-03-05 20:37:49 +0200393static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200394{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200395 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200396
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200397 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300399
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700400 /* Read Local Supported Commands */
401 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300406 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300408
409 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700411
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700412 /* Read Flow Control Mode */
413 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700415 /* Read Location Data */
416 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200417}
418
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200420{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200421 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200422
423 BT_DBG("%s %ld", hdev->name, opt);
424
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300425 /* Reset */
426 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300428
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200429 switch (hdev->dev_type) {
430 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200432 break;
433
434 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200436 break;
437
438 default:
439 BT_ERR("Unknown device type %d", hdev->dev_type);
440 break;
441 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200442}
443
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200446 __le16 param;
447 __u8 flt_type;
448
449 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200450 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451
452 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200454
455 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200456 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200457
458 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200459 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200460
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700461 /* Read Number of Supported IAC */
462 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700464 /* Read Current IAC LAP */
465 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467 /* Clear Event Filters */
468 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200469 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200470
471 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700472 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200473 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200474}
475
Johan Hedberg42c6b122013-03-05 20:37:49 +0200476static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200477{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300478 struct hci_dev *hdev = req->hdev;
479
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200481 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200482
483 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200484 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200485
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800486 /* Read LE Supported States */
487 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800492 /* Clear LE White List */
493 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300494
495 /* LE-only controllers have LE implicitly enabled */
496 if (!lmp_bredr_capable(hdev))
497 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498}
499
500static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
501{
502 if (lmp_ext_inq_capable(hdev))
503 return 0x02;
504
505 if (lmp_inq_rssi_capable(hdev))
506 return 0x01;
507
508 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
509 hdev->lmp_subver == 0x0757)
510 return 0x01;
511
512 if (hdev->manufacturer == 15) {
513 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
514 return 0x01;
515 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
516 return 0x01;
517 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
518 return 0x01;
519 }
520
521 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
522 hdev->lmp_subver == 0x1805)
523 return 0x01;
524
525 return 0x00;
526}
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529{
530 u8 mode;
531
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535}
536
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200538{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200539 struct hci_dev *hdev = req->hdev;
540
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541 /* The second byte is 0xff instead of 0x9f (two reserved bits
542 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
543 * command otherwise.
544 */
545 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
546
547 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
548 * any event mask for pre 1.2 devices.
549 */
550 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
551 return;
552
553 if (lmp_bredr_capable(hdev)) {
554 events[4] |= 0x01; /* Flow Specification Complete */
555 events[4] |= 0x02; /* Inquiry Result with RSSI */
556 events[4] |= 0x04; /* Read Remote Extended Features Complete */
557 events[5] |= 0x08; /* Synchronous Connection Complete */
558 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700559 } else {
560 /* Use a different default for LE-only devices */
561 memset(events, 0, sizeof(events));
562 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700563 events[1] |= 0x08; /* Read Remote Version Information Complete */
564 events[1] |= 0x20; /* Command Complete */
565 events[1] |= 0x40; /* Command Status */
566 events[1] |= 0x80; /* Hardware Error */
567 events[2] |= 0x04; /* Number of Completed Packets */
568 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200569
570 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
571 events[0] |= 0x80; /* Encryption Change */
572 events[5] |= 0x80; /* Encryption Key Refresh Complete */
573 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200574 }
575
576 if (lmp_inq_rssi_capable(hdev))
577 events[4] |= 0x02; /* Inquiry Result with RSSI */
578
579 if (lmp_sniffsubr_capable(hdev))
580 events[5] |= 0x20; /* Sniff Subrating */
581
582 if (lmp_pause_enc_capable(hdev))
583 events[5] |= 0x80; /* Encryption Key Refresh Complete */
584
585 if (lmp_ext_inq_capable(hdev))
586 events[5] |= 0x40; /* Extended Inquiry Result */
587
588 if (lmp_no_flush_capable(hdev))
589 events[7] |= 0x01; /* Enhanced Flush Complete */
590
591 if (lmp_lsto_capable(hdev))
592 events[6] |= 0x80; /* Link Supervision Timeout Changed */
593
594 if (lmp_ssp_capable(hdev)) {
595 events[6] |= 0x01; /* IO Capability Request */
596 events[6] |= 0x02; /* IO Capability Response */
597 events[6] |= 0x04; /* User Confirmation Request */
598 events[6] |= 0x08; /* User Passkey Request */
599 events[6] |= 0x10; /* Remote OOB Data Request */
600 events[6] |= 0x20; /* Simple Pairing Complete */
601 events[7] |= 0x04; /* User Passkey Notification */
602 events[7] |= 0x08; /* Keypress Notification */
603 events[7] |= 0x10; /* Remote Host Supported
604 * Features Notification
605 */
606 }
607
608 if (lmp_le_capable(hdev))
609 events[7] |= 0x20; /* LE Meta-Event */
610
Johan Hedberg42c6b122013-03-05 20:37:49 +0200611 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200612}
613
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200615{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200616 struct hci_dev *hdev = req->hdev;
617
Johan Hedberg2177bab2013-03-05 20:37:43 +0200618 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200619 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300620 else
621 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200622
623 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200624 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200625
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300626 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
627 * local supported commands HCI command.
628 */
629 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200630 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200631
632 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700633 /* When SSP is available, then the host features page
634 * should also be available as well. However some
635 * controllers list the max_page as 0 as long as SSP
636 * has not been enabled. To achieve proper debugging
637 * output, force the minimum max_page to 1 at least.
638 */
639 hdev->max_page = 0x01;
640
Johan Hedberg2177bab2013-03-05 20:37:43 +0200641 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
642 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200643 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
644 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200645 } else {
646 struct hci_cp_write_eir cp;
647
648 memset(hdev->eir, 0, sizeof(hdev->eir));
649 memset(&cp, 0, sizeof(cp));
650
Johan Hedberg42c6b122013-03-05 20:37:49 +0200651 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200652 }
653 }
654
655 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200656 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200657
658 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200659 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660
661 if (lmp_ext_feat_capable(hdev)) {
662 struct hci_cp_read_local_ext_features cp;
663
664 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
666 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200667 }
668
669 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
670 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200671 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
672 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200673 }
674}
675
Johan Hedberg42c6b122013-03-05 20:37:49 +0200676static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200678 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200679 struct hci_cp_write_def_link_policy cp;
680 u16 link_policy = 0;
681
682 if (lmp_rswitch_capable(hdev))
683 link_policy |= HCI_LP_RSWITCH;
684 if (lmp_hold_capable(hdev))
685 link_policy |= HCI_LP_HOLD;
686 if (lmp_sniff_capable(hdev))
687 link_policy |= HCI_LP_SNIFF;
688 if (lmp_park_capable(hdev))
689 link_policy |= HCI_LP_PARK;
690
691 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200692 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200693}
694
Johan Hedberg42c6b122013-03-05 20:37:49 +0200695static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200696{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200697 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200698 struct hci_cp_write_le_host_supported cp;
699
Johan Hedbergc73eee92013-04-19 18:35:21 +0300700 /* LE-only devices do not support explicit enablement */
701 if (!lmp_bredr_capable(hdev))
702 return;
703
Johan Hedberg2177bab2013-03-05 20:37:43 +0200704 memset(&cp, 0, sizeof(cp));
705
706 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
707 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200708 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200709 }
710
711 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200712 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
713 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200714}
715
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300716static void hci_set_event_mask_page_2(struct hci_request *req)
717{
718 struct hci_dev *hdev = req->hdev;
719 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
720
721 /* If Connectionless Slave Broadcast master role is supported
722 * enable all necessary events for it.
723 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800724 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300725 events[1] |= 0x40; /* Triggered Clock Capture */
726 events[1] |= 0x80; /* Synchronization Train Complete */
727 events[2] |= 0x10; /* Slave Page Response Timeout */
728 events[2] |= 0x20; /* CSB Channel Map Change */
729 }
730
731 /* If Connectionless Slave Broadcast slave role is supported
732 * enable all necessary events for it.
733 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800734 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300735 events[2] |= 0x01; /* Synchronization Train Received */
736 events[2] |= 0x02; /* CSB Receive */
737 events[2] |= 0x04; /* CSB Timeout */
738 events[2] |= 0x08; /* Truncated Page Complete */
739 }
740
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800741 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200742 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800743 events[2] |= 0x80;
744
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300745 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
746}
747
Johan Hedberg42c6b122013-03-05 20:37:49 +0200748static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200749{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300751 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200753 hci_setup_event_mask(req);
754
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100755 /* Some Broadcom based Bluetooth controllers do not support the
756 * Delete Stored Link Key command. They are clearly indicating its
757 * absence in the bit mask of supported commands.
758 *
759 * Check the supported commands and only if the the command is marked
760 * as supported send it. If not supported assume that the controller
761 * does not have actual support for stored link keys which makes this
762 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -0800763 *
764 * Some controllers indicate that they support handling deleting
765 * stored link keys, but they don't. The quirk lets a driver
766 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700767 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -0800768 if (hdev->commands[6] & 0x80 &&
769 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +0300770 struct hci_cp_delete_stored_link_key cp;
771
772 bacpy(&cp.bdaddr, BDADDR_ANY);
773 cp.delete_all = 0x01;
774 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
775 sizeof(cp), &cp);
776 }
777
Johan Hedberg2177bab2013-03-05 20:37:43 +0200778 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200779 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200780
Marcel Holtmann417287d2014-12-11 20:21:54 +0100781 if (hdev->commands[8] & 0x01)
782 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
783
784 /* Some older Broadcom based Bluetooth 1.2 controllers do not
785 * support the Read Page Scan Type command. Check support for
786 * this command in the bit mask of supported commands.
787 */
788 if (hdev->commands[13] & 0x01)
789 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
790
Andre Guedes9193c6e2014-07-01 18:10:09 -0300791 if (lmp_le_capable(hdev)) {
792 u8 events[8];
793
794 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200795 events[0] = 0x0f;
796
797 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
798 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300799
800 /* If controller supports the Connection Parameters Request
801 * Link Layer Procedure, enable the corresponding event.
802 */
803 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
804 events[0] |= 0x20; /* LE Remote Connection
805 * Parameter Request
806 */
807
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100808 /* If the controller supports the Data Length Extension
809 * feature, enable the corresponding event.
810 */
811 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
812 events[0] |= 0x40; /* LE Data Length Change */
813
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100814 /* If the controller supports Extended Scanner Filter
815 * Policies, enable the correspondig event.
816 */
817 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
818 events[1] |= 0x04; /* LE Direct Advertising
819 * Report
820 */
821
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100822 /* If the controller supports the LE Read Local P-256
823 * Public Key command, enable the corresponding event.
824 */
825 if (hdev->commands[34] & 0x02)
826 events[0] |= 0x80; /* LE Read Local P-256
827 * Public Key Complete
828 */
829
830 /* If the controller supports the LE Generate DHKey
831 * command, enable the corresponding event.
832 */
833 if (hdev->commands[34] & 0x04)
834 events[1] |= 0x01; /* LE Generate DHKey Complete */
835
Andre Guedes9193c6e2014-07-01 18:10:09 -0300836 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
837 events);
838
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200839 if (hdev->commands[25] & 0x40) {
840 /* Read LE Advertising Channel TX Power */
841 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
842 }
843
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100844 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
845 /* Read LE Maximum Data Length */
846 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
847
848 /* Read LE Suggested Default Data Length */
849 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
850 }
851
Johan Hedberg42c6b122013-03-05 20:37:49 +0200852 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300853 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300854
855 /* Read features beyond page 1 if available */
856 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
857 struct hci_cp_read_local_ext_features cp;
858
859 cp.page = p;
860 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
861 sizeof(cp), &cp);
862 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200863}
864
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300865static void hci_init4_req(struct hci_request *req, unsigned long opt)
866{
867 struct hci_dev *hdev = req->hdev;
868
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300869 /* Set event mask page 2 if the HCI command for it is supported */
870 if (hdev->commands[22] & 0x04)
871 hci_set_event_mask_page_2(req);
872
Marcel Holtmann109e3192014-07-23 19:24:56 +0200873 /* Read local codec list if the HCI command is supported */
874 if (hdev->commands[29] & 0x20)
875 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
876
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200877 /* Get MWS transport configuration if the HCI command is supported */
878 if (hdev->commands[30] & 0x08)
879 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
880
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300881 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800882 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300883 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800884
885 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +0300886 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800887 u8 support = 0x01;
888 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
889 sizeof(support), &support);
890 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300891}
892
Johan Hedberg2177bab2013-03-05 20:37:43 +0200893static int __hci_init(struct hci_dev *hdev)
894{
895 int err;
896
897 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
898 if (err < 0)
899 return err;
900
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700901 /* The Device Under Test (DUT) mode is special and available for
902 * all controller types. So just create it early on.
903 */
904 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
905 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
906 &dut_mode_fops);
907 }
908
Johan Hedberg2177bab2013-03-05 20:37:43 +0200909 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
910 * BR/EDR/LE type controllers. AMP controllers only need the
911 * first stage init.
912 */
913 if (hdev->dev_type != HCI_BREDR)
914 return 0;
915
916 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
917 if (err < 0)
918 return err;
919
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300920 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
921 if (err < 0)
922 return err;
923
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700924 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
925 if (err < 0)
926 return err;
927
928 /* Only create debugfs entries during the initial setup
929 * phase and not every time the controller gets powered on.
930 */
931 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
932 return 0;
933
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100934 hci_debugfs_create_common(hdev);
935
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100936 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100937 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700938
Marcel Holtmannd0f729b2013-10-18 15:23:46 -0700939 if (lmp_le_capable(hdev)) {
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100940 hci_debugfs_create_le(hdev);
Johan Hedberg711eafe2014-08-08 09:32:52 +0300941 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -0700942 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700943
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700944 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200945}
946
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200947static void hci_init0_req(struct hci_request *req, unsigned long opt)
948{
949 struct hci_dev *hdev = req->hdev;
950
951 BT_DBG("%s %ld", hdev->name, opt);
952
953 /* Reset */
954 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
955 hci_reset_req(req, 0);
956
957 /* Read Local Version */
958 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
959
960 /* Read BD Address */
961 if (hdev->set_bdaddr)
962 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
963}
964
965static int __hci_unconf_init(struct hci_dev *hdev)
966{
967 int err;
968
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200969 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
970 return 0;
971
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200972 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
973 if (err < 0)
974 return err;
975
976 return 0;
977}
978
Johan Hedberg42c6b122013-03-05 20:37:49 +0200979static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980{
981 __u8 scan = opt;
982
Johan Hedberg42c6b122013-03-05 20:37:49 +0200983 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
985 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200986 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987}
988
Johan Hedberg42c6b122013-03-05 20:37:49 +0200989static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990{
991 __u8 auth = opt;
992
Johan Hedberg42c6b122013-03-05 20:37:49 +0200993 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
995 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200996 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997}
998
Johan Hedberg42c6b122013-03-05 20:37:49 +0200999static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000{
1001 __u8 encrypt = opt;
1002
Johan Hedberg42c6b122013-03-05 20:37:49 +02001003 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001005 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001006 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007}
1008
Johan Hedberg42c6b122013-03-05 20:37:49 +02001009static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001010{
1011 __le16 policy = cpu_to_le16(opt);
1012
Johan Hedberg42c6b122013-03-05 20:37:49 +02001013 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001014
1015 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001016 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001017}
1018
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001019/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 * Device is held on return. */
1021struct hci_dev *hci_dev_get(int index)
1022{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001023 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
1025 BT_DBG("%d", index);
1026
1027 if (index < 0)
1028 return NULL;
1029
1030 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001031 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 if (d->id == index) {
1033 hdev = hci_dev_hold(d);
1034 break;
1035 }
1036 }
1037 read_unlock(&hci_dev_list_lock);
1038 return hdev;
1039}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
1041/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001042
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001043bool hci_discovery_active(struct hci_dev *hdev)
1044{
1045 struct discovery_state *discov = &hdev->discovery;
1046
Andre Guedes6fbe1952012-02-03 17:47:58 -03001047 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001048 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001049 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001050 return true;
1051
Andre Guedes6fbe1952012-02-03 17:47:58 -03001052 default:
1053 return false;
1054 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001055}
1056
Johan Hedbergff9ef572012-01-04 14:23:45 +02001057void hci_discovery_set_state(struct hci_dev *hdev, int state)
1058{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001059 int old_state = hdev->discovery.state;
1060
Johan Hedbergff9ef572012-01-04 14:23:45 +02001061 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1062
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001063 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001064 return;
1065
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001066 hdev->discovery.state = state;
1067
Johan Hedbergff9ef572012-01-04 14:23:45 +02001068 switch (state) {
1069 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001070 hci_update_background_scan(hdev);
1071
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001072 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001073 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001074 break;
1075 case DISCOVERY_STARTING:
1076 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001077 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001078 mgmt_discovering(hdev, 1);
1079 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001080 case DISCOVERY_RESOLVING:
1081 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001082 case DISCOVERY_STOPPING:
1083 break;
1084 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001085}
1086
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001087void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088{
Johan Hedberg30883512012-01-04 14:16:21 +02001089 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001090 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
Johan Hedberg561aafb2012-01-04 13:31:59 +02001092 list_for_each_entry_safe(p, n, &cache->all, all) {
1093 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001094 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001096
1097 INIT_LIST_HEAD(&cache->unknown);
1098 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099}
1100
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001101struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1102 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103{
Johan Hedberg30883512012-01-04 14:16:21 +02001104 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 struct inquiry_entry *e;
1106
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001107 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
Johan Hedberg561aafb2012-01-04 13:31:59 +02001109 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001111 return e;
1112 }
1113
1114 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115}
1116
Johan Hedberg561aafb2012-01-04 13:31:59 +02001117struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001118 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001119{
Johan Hedberg30883512012-01-04 14:16:21 +02001120 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001121 struct inquiry_entry *e;
1122
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001123 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001124
1125 list_for_each_entry(e, &cache->unknown, list) {
1126 if (!bacmp(&e->data.bdaddr, bdaddr))
1127 return e;
1128 }
1129
1130 return NULL;
1131}
1132
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001133struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001134 bdaddr_t *bdaddr,
1135 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001136{
1137 struct discovery_state *cache = &hdev->discovery;
1138 struct inquiry_entry *e;
1139
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001140 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001141
1142 list_for_each_entry(e, &cache->resolve, list) {
1143 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1144 return e;
1145 if (!bacmp(&e->data.bdaddr, bdaddr))
1146 return e;
1147 }
1148
1149 return NULL;
1150}
1151
Johan Hedberga3d4e202012-01-09 00:53:02 +02001152void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001153 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001154{
1155 struct discovery_state *cache = &hdev->discovery;
1156 struct list_head *pos = &cache->resolve;
1157 struct inquiry_entry *p;
1158
1159 list_del(&ie->list);
1160
1161 list_for_each_entry(p, &cache->resolve, list) {
1162 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001163 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001164 break;
1165 pos = &p->list;
1166 }
1167
1168 list_add(&ie->list, pos);
1169}
1170
Marcel Holtmannaf589252014-07-01 14:11:20 +02001171u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1172 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173{
Johan Hedberg30883512012-01-04 14:16:21 +02001174 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001175 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001176 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001178 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Johan Hedberg6928a922014-10-26 20:46:09 +01001180 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001181
Marcel Holtmannaf589252014-07-01 14:11:20 +02001182 if (!data->ssp_mode)
1183 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001184
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001185 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001186 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001187 if (!ie->data.ssp_mode)
1188 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001189
Johan Hedberga3d4e202012-01-09 00:53:02 +02001190 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001191 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001192 ie->data.rssi = data->rssi;
1193 hci_inquiry_cache_update_resolve(hdev, ie);
1194 }
1195
Johan Hedberg561aafb2012-01-04 13:31:59 +02001196 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001197 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001198
Johan Hedberg561aafb2012-01-04 13:31:59 +02001199 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001200 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001201 if (!ie) {
1202 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1203 goto done;
1204 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001205
1206 list_add(&ie->all, &cache->all);
1207
1208 if (name_known) {
1209 ie->name_state = NAME_KNOWN;
1210 } else {
1211 ie->name_state = NAME_NOT_KNOWN;
1212 list_add(&ie->list, &cache->unknown);
1213 }
1214
1215update:
1216 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001217 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001218 ie->name_state = NAME_KNOWN;
1219 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 }
1221
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001222 memcpy(&ie->data, data, sizeof(*data));
1223 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001225
1226 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001227 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001228
Marcel Holtmannaf589252014-07-01 14:11:20 +02001229done:
1230 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231}
1232
1233static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1234{
Johan Hedberg30883512012-01-04 14:16:21 +02001235 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 struct inquiry_info *info = (struct inquiry_info *) buf;
1237 struct inquiry_entry *e;
1238 int copied = 0;
1239
Johan Hedberg561aafb2012-01-04 13:31:59 +02001240 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001242
1243 if (copied >= num)
1244 break;
1245
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 bacpy(&info->bdaddr, &data->bdaddr);
1247 info->pscan_rep_mode = data->pscan_rep_mode;
1248 info->pscan_period_mode = data->pscan_period_mode;
1249 info->pscan_mode = data->pscan_mode;
1250 memcpy(info->dev_class, data->dev_class, 3);
1251 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001252
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001254 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 }
1256
1257 BT_DBG("cache %p, copied %d", cache, copied);
1258 return copied;
1259}
1260
Johan Hedberg42c6b122013-03-05 20:37:49 +02001261static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262{
1263 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001264 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 struct hci_cp_inquiry cp;
1266
1267 BT_DBG("%s", hdev->name);
1268
1269 if (test_bit(HCI_INQUIRY, &hdev->flags))
1270 return;
1271
1272 /* Start Inquiry */
1273 memcpy(&cp.lap, &ir->lap, 3);
1274 cp.length = ir->length;
1275 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001276 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277}
1278
1279int hci_inquiry(void __user *arg)
1280{
1281 __u8 __user *ptr = arg;
1282 struct hci_inquiry_req ir;
1283 struct hci_dev *hdev;
1284 int err = 0, do_inquiry = 0, max_rsp;
1285 long timeo;
1286 __u8 *buf;
1287
1288 if (copy_from_user(&ir, ptr, sizeof(ir)))
1289 return -EFAULT;
1290
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001291 hdev = hci_dev_get(ir.dev_id);
1292 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 return -ENODEV;
1294
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001295 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1296 err = -EBUSY;
1297 goto done;
1298 }
1299
Marcel Holtmann4a964402014-07-02 19:10:33 +02001300 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001301 err = -EOPNOTSUPP;
1302 goto done;
1303 }
1304
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001305 if (hdev->dev_type != HCI_BREDR) {
1306 err = -EOPNOTSUPP;
1307 goto done;
1308 }
1309
Johan Hedberg56f87902013-10-02 13:43:13 +03001310 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1311 err = -EOPNOTSUPP;
1312 goto done;
1313 }
1314
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001315 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001316 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001317 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001318 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 do_inquiry = 1;
1320 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001321 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322
Marcel Holtmann04837f62006-07-03 10:02:33 +02001323 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001324
1325 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001326 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1327 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001328 if (err < 0)
1329 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001330
1331 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1332 * cleared). If it is interrupted by a signal, return -EINTR.
1333 */
NeilBrown74316202014-07-07 15:16:04 +10001334 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001335 TASK_INTERRUPTIBLE))
1336 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001339 /* for unlimited number of responses we will use buffer with
1340 * 255 entries
1341 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1343
1344 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1345 * copy it to the user space.
1346 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001347 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001348 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 err = -ENOMEM;
1350 goto done;
1351 }
1352
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001353 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001355 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356
1357 BT_DBG("num_rsp %d", ir.num_rsp);
1358
1359 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1360 ptr += sizeof(ir);
1361 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001362 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001364 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 err = -EFAULT;
1366
1367 kfree(buf);
1368
1369done:
1370 hci_dev_put(hdev);
1371 return err;
1372}
1373
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001374static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 int ret = 0;
1377
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 BT_DBG("%s %p", hdev->name, hdev);
1379
1380 hci_req_lock(hdev);
1381
Johan Hovold94324962012-03-15 14:48:41 +01001382 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1383 ret = -ENODEV;
1384 goto done;
1385 }
1386
Marcel Holtmannd603b762014-07-06 12:11:14 +02001387 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1388 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001389 /* Check for rfkill but allow the HCI setup stage to
1390 * proceed (which in itself doesn't cause any RF activity).
1391 */
1392 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1393 ret = -ERFKILL;
1394 goto done;
1395 }
1396
1397 /* Check for valid public address or a configured static
1398 * random adddress, but let the HCI setup proceed to
1399 * be able to determine if there is a public address
1400 * or not.
1401 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001402 * In case of user channel usage, it is not important
1403 * if a public address or static random address is
1404 * available.
1405 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001406 * This check is only valid for BR/EDR controllers
1407 * since AMP controllers do not have an address.
1408 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001409 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1410 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001411 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1412 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1413 ret = -EADDRNOTAVAIL;
1414 goto done;
1415 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001416 }
1417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 if (test_bit(HCI_UP, &hdev->flags)) {
1419 ret = -EALREADY;
1420 goto done;
1421 }
1422
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 if (hdev->open(hdev)) {
1424 ret = -EIO;
1425 goto done;
1426 }
1427
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001428 atomic_set(&hdev->cmd_cnt, 1);
1429 set_bit(HCI_INIT, &hdev->flags);
1430
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001431 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1432 if (hdev->setup)
1433 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001434
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001435 /* The transport driver can set these quirks before
1436 * creating the HCI device or in its setup callback.
1437 *
1438 * In case any of them is set, the controller has to
1439 * start up as unconfigured.
1440 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001441 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1442 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001443 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001444
1445 /* For an unconfigured controller it is required to
1446 * read at least the version information provided by
1447 * the Read Local Version Information command.
1448 *
1449 * If the set_bdaddr driver callback is provided, then
1450 * also the original Bluetooth public device address
1451 * will be read using the Read BD Address command.
1452 */
1453 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1454 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001455 }
1456
Marcel Holtmann9713c172014-07-06 12:11:15 +02001457 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1458 /* If public address change is configured, ensure that
1459 * the address gets programmed. If the driver does not
1460 * support changing the public address, fail the power
1461 * on procedure.
1462 */
1463 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1464 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001465 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1466 else
1467 ret = -EADDRNOTAVAIL;
1468 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001469
1470 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02001471 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001472 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001473 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 }
1475
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001476 clear_bit(HCI_INIT, &hdev->flags);
1477
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 if (!ret) {
1479 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02001480 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 set_bit(HCI_UP, &hdev->flags);
1482 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001483 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02001484 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02001485 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001486 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001487 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001488 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001489 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001490 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001491 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001492 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001494 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001495 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001496 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
1498 skb_queue_purge(&hdev->cmd_q);
1499 skb_queue_purge(&hdev->rx_q);
1500
1501 if (hdev->flush)
1502 hdev->flush(hdev);
1503
1504 if (hdev->sent_cmd) {
1505 kfree_skb(hdev->sent_cmd);
1506 hdev->sent_cmd = NULL;
1507 }
1508
1509 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001510 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 }
1512
1513done:
1514 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 return ret;
1516}
1517
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001518/* ---- HCI ioctl helpers ---- */
1519
1520int hci_dev_open(__u16 dev)
1521{
1522 struct hci_dev *hdev;
1523 int err;
1524
1525 hdev = hci_dev_get(dev);
1526 if (!hdev)
1527 return -ENODEV;
1528
Marcel Holtmann4a964402014-07-02 19:10:33 +02001529 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001530 * up as user channel. Trying to bring them up as normal devices
1531 * will result into a failure. Only user channel operation is
1532 * possible.
1533 *
1534 * When this function is called for a user channel, the flag
1535 * HCI_USER_CHANNEL will be set first before attempting to
1536 * open the device.
1537 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02001538 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001539 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1540 err = -EOPNOTSUPP;
1541 goto done;
1542 }
1543
Johan Hedberge1d08f42013-10-01 22:44:50 +03001544 /* We need to ensure that no other power on/off work is pending
1545 * before proceeding to call hci_dev_do_open. This is
1546 * particularly important if the setup procedure has not yet
1547 * completed.
1548 */
1549 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1550 cancel_delayed_work(&hdev->power_off);
1551
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001552 /* After this call it is guaranteed that the setup procedure
1553 * has finished. This means that error conditions like RFKILL
1554 * or no valid public or static random address apply.
1555 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001556 flush_workqueue(hdev->req_workqueue);
1557
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001558 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001559 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001560 * so that pairing works for them. Once the management interface
1561 * is in use this bit will be cleared again and userspace has
1562 * to explicitly enable it.
1563 */
1564 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1565 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001566 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001567
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001568 err = hci_dev_do_open(hdev);
1569
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001570done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001571 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001572 return err;
1573}
1574
Johan Hedbergd7347f32014-07-04 12:37:23 +03001575/* This function requires the caller holds hdev->lock */
1576static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1577{
1578 struct hci_conn_params *p;
1579
Johan Hedbergf161dd42014-08-15 21:06:54 +03001580 list_for_each_entry(p, &hdev->le_conn_params, list) {
1581 if (p->conn) {
1582 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001583 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001584 p->conn = NULL;
1585 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001586 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001587 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001588
1589 BT_DBG("All LE pending actions cleared");
1590}
1591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592static int hci_dev_do_close(struct hci_dev *hdev)
1593{
1594 BT_DBG("%s %p", hdev->name, hdev);
1595
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001596 cancel_delayed_work(&hdev->power_off);
1597
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 hci_req_cancel(hdev, ENODEV);
1599 hci_req_lock(hdev);
1600
1601 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001602 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 hci_req_unlock(hdev);
1604 return 0;
1605 }
1606
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001607 /* Flush RX and TX works */
1608 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001609 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001611 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001612 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001613 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001614 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001615 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001616 }
1617
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001618 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001619 cancel_delayed_work(&hdev->service_cache);
1620
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001621 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001622
1623 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1624 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001625
Johan Hedberg76727c02014-11-18 09:00:14 +02001626 /* Avoid potential lockdep warnings from the *_flush() calls by
1627 * ensuring the workqueue is empty up front.
1628 */
1629 drain_workqueue(hdev->workqueue);
1630
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001631 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001632
1633 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1634 if (hdev->dev_type == HCI_BREDR)
1635 mgmt_powered(hdev, 0);
1636 }
1637
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001638 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001639 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001640 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001641 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
1643 hci_notify(hdev, HCI_DEV_DOWN);
1644
1645 if (hdev->flush)
1646 hdev->flush(hdev);
1647
1648 /* Reset device */
1649 skb_queue_purge(&hdev->cmd_q);
1650 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02001651 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1652 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001653 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001655 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 clear_bit(HCI_INIT, &hdev->flags);
1657 }
1658
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001659 /* flush cmd work */
1660 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
1662 /* Drop queues */
1663 skb_queue_purge(&hdev->rx_q);
1664 skb_queue_purge(&hdev->cmd_q);
1665 skb_queue_purge(&hdev->raw_q);
1666
1667 /* Drop last sent command */
1668 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001669 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 kfree_skb(hdev->sent_cmd);
1671 hdev->sent_cmd = NULL;
1672 }
1673
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001674 kfree_skb(hdev->recv_evt);
1675 hdev->recv_evt = NULL;
1676
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 /* After this point our queues are empty
1678 * and no tasks are scheduled. */
1679 hdev->close(hdev);
1680
Johan Hedberg35b973c2013-03-15 17:06:59 -05001681 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001682 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001683 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1684
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001685 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001686 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001687
Johan Hedberge59fda82012-02-22 18:11:53 +02001688 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001689 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001690 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001691
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 hci_req_unlock(hdev);
1693
1694 hci_dev_put(hdev);
1695 return 0;
1696}
1697
1698int hci_dev_close(__u16 dev)
1699{
1700 struct hci_dev *hdev;
1701 int err;
1702
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001703 hdev = hci_dev_get(dev);
1704 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001706
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001707 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1708 err = -EBUSY;
1709 goto done;
1710 }
1711
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001712 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1713 cancel_delayed_work(&hdev->power_off);
1714
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001716
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001717done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 hci_dev_put(hdev);
1719 return err;
1720}
1721
1722int hci_dev_reset(__u16 dev)
1723{
1724 struct hci_dev *hdev;
1725 int ret = 0;
1726
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001727 hdev = hci_dev_get(dev);
1728 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 return -ENODEV;
1730
1731 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
Marcel Holtmann808a0492013-08-26 20:57:58 -07001733 if (!test_bit(HCI_UP, &hdev->flags)) {
1734 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001736 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001738 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1739 ret = -EBUSY;
1740 goto done;
1741 }
1742
Marcel Holtmann4a964402014-07-02 19:10:33 +02001743 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001744 ret = -EOPNOTSUPP;
1745 goto done;
1746 }
1747
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 /* Drop queues */
1749 skb_queue_purge(&hdev->rx_q);
1750 skb_queue_purge(&hdev->cmd_q);
1751
Johan Hedberg76727c02014-11-18 09:00:14 +02001752 /* Avoid potential lockdep warnings from the *_flush() calls by
1753 * ensuring the workqueue is empty up front.
1754 */
1755 drain_workqueue(hdev->workqueue);
1756
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001757 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001758 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001760 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761
1762 if (hdev->flush)
1763 hdev->flush(hdev);
1764
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001765 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001766 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001768 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
1770done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 hci_req_unlock(hdev);
1772 hci_dev_put(hdev);
1773 return ret;
1774}
1775
1776int hci_dev_reset_stat(__u16 dev)
1777{
1778 struct hci_dev *hdev;
1779 int ret = 0;
1780
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001781 hdev = hci_dev_get(dev);
1782 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 return -ENODEV;
1784
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001785 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1786 ret = -EBUSY;
1787 goto done;
1788 }
1789
Marcel Holtmann4a964402014-07-02 19:10:33 +02001790 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001791 ret = -EOPNOTSUPP;
1792 goto done;
1793 }
1794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1796
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001797done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 return ret;
1800}
1801
Johan Hedberg123abc02014-07-10 12:09:07 +03001802static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1803{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001804 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001805
1806 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1807
1808 if ((scan & SCAN_PAGE))
1809 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1810 &hdev->dev_flags);
1811 else
1812 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1813 &hdev->dev_flags);
1814
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001815 if ((scan & SCAN_INQUIRY)) {
1816 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1817 &hdev->dev_flags);
1818 } else {
1819 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1820 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1821 &hdev->dev_flags);
1822 }
1823
Johan Hedberg123abc02014-07-10 12:09:07 +03001824 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1825 return;
1826
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001827 if (conn_changed || discov_changed) {
1828 /* In case this was disabled through mgmt */
1829 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1830
1831 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1832 mgmt_update_adv_data(hdev);
1833
Johan Hedberg123abc02014-07-10 12:09:07 +03001834 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001835 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001836}
1837
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838int hci_dev_cmd(unsigned int cmd, void __user *arg)
1839{
1840 struct hci_dev *hdev;
1841 struct hci_dev_req dr;
1842 int err = 0;
1843
1844 if (copy_from_user(&dr, arg, sizeof(dr)))
1845 return -EFAULT;
1846
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001847 hdev = hci_dev_get(dr.dev_id);
1848 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 return -ENODEV;
1850
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001851 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1852 err = -EBUSY;
1853 goto done;
1854 }
1855
Marcel Holtmann4a964402014-07-02 19:10:33 +02001856 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001857 err = -EOPNOTSUPP;
1858 goto done;
1859 }
1860
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001861 if (hdev->dev_type != HCI_BREDR) {
1862 err = -EOPNOTSUPP;
1863 goto done;
1864 }
1865
Johan Hedberg56f87902013-10-02 13:43:13 +03001866 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1867 err = -EOPNOTSUPP;
1868 goto done;
1869 }
1870
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 switch (cmd) {
1872 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001873 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1874 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 break;
1876
1877 case HCISETENCRYPT:
1878 if (!lmp_encrypt_capable(hdev)) {
1879 err = -EOPNOTSUPP;
1880 break;
1881 }
1882
1883 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1884 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001885 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1886 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 if (err)
1888 break;
1889 }
1890
Johan Hedberg01178cd2013-03-05 20:37:41 +02001891 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1892 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 break;
1894
1895 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001896 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1897 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001898
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001899 /* Ensure that the connectable and discoverable states
1900 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001901 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001902 if (!err)
1903 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 break;
1905
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001906 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001907 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1908 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001909 break;
1910
1911 case HCISETLINKMODE:
1912 hdev->link_mode = ((__u16) dr.dev_opt) &
1913 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1914 break;
1915
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 case HCISETPTYPE:
1917 hdev->pkt_type = (__u16) dr.dev_opt;
1918 break;
1919
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001921 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1922 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 break;
1924
1925 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001926 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1927 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 break;
1929
1930 default:
1931 err = -EINVAL;
1932 break;
1933 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001934
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001935done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 hci_dev_put(hdev);
1937 return err;
1938}
1939
1940int hci_get_dev_list(void __user *arg)
1941{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001942 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 struct hci_dev_list_req *dl;
1944 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 int n = 0, size, err;
1946 __u16 dev_num;
1947
1948 if (get_user(dev_num, (__u16 __user *) arg))
1949 return -EFAULT;
1950
1951 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1952 return -EINVAL;
1953
1954 size = sizeof(*dl) + dev_num * sizeof(*dr);
1955
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001956 dl = kzalloc(size, GFP_KERNEL);
1957 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 return -ENOMEM;
1959
1960 dr = dl->dev_req;
1961
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001962 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001963 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001964 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001965
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001966 /* When the auto-off is configured it means the transport
1967 * is running, but in that case still indicate that the
1968 * device is actually down.
1969 */
1970 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1971 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001972
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001974 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001975
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 if (++n >= dev_num)
1977 break;
1978 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001979 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
1981 dl->dev_num = n;
1982 size = sizeof(*dl) + n * sizeof(*dr);
1983
1984 err = copy_to_user(arg, dl, size);
1985 kfree(dl);
1986
1987 return err ? -EFAULT : 0;
1988}
1989
1990int hci_get_dev_info(void __user *arg)
1991{
1992 struct hci_dev *hdev;
1993 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001994 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 int err = 0;
1996
1997 if (copy_from_user(&di, arg, sizeof(di)))
1998 return -EFAULT;
1999
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002000 hdev = hci_dev_get(di.dev_id);
2001 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 return -ENODEV;
2003
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002004 /* When the auto-off is configured it means the transport
2005 * is running, but in that case still indicate that the
2006 * device is actually down.
2007 */
2008 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2009 flags = hdev->flags & ~BIT(HCI_UP);
2010 else
2011 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002012
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 strcpy(di.name, hdev->name);
2014 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002015 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002016 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002018 if (lmp_bredr_capable(hdev)) {
2019 di.acl_mtu = hdev->acl_mtu;
2020 di.acl_pkts = hdev->acl_pkts;
2021 di.sco_mtu = hdev->sco_mtu;
2022 di.sco_pkts = hdev->sco_pkts;
2023 } else {
2024 di.acl_mtu = hdev->le_mtu;
2025 di.acl_pkts = hdev->le_pkts;
2026 di.sco_mtu = 0;
2027 di.sco_pkts = 0;
2028 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 di.link_policy = hdev->link_policy;
2030 di.link_mode = hdev->link_mode;
2031
2032 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2033 memcpy(&di.features, &hdev->features, sizeof(di.features));
2034
2035 if (copy_to_user(arg, &di, sizeof(di)))
2036 err = -EFAULT;
2037
2038 hci_dev_put(hdev);
2039
2040 return err;
2041}
2042
2043/* ---- Interface to HCI drivers ---- */
2044
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002045static int hci_rfkill_set_block(void *data, bool blocked)
2046{
2047 struct hci_dev *hdev = data;
2048
2049 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2050
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002051 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2052 return -EBUSY;
2053
Johan Hedberg5e130362013-09-13 08:58:17 +03002054 if (blocked) {
2055 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002056 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2057 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002058 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002059 } else {
2060 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002061 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002062
2063 return 0;
2064}
2065
2066static const struct rfkill_ops hci_rfkill_ops = {
2067 .set_block = hci_rfkill_set_block,
2068};
2069
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002070static void hci_power_on(struct work_struct *work)
2071{
2072 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002073 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002074
2075 BT_DBG("%s", hdev->name);
2076
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002077 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002078 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302079 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002080 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302081 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002082 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002083 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002084
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002085 /* During the HCI setup phase, a few error conditions are
2086 * ignored and they need to be checked now. If they are still
2087 * valid, it is important to turn the device back off.
2088 */
2089 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002090 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002091 (hdev->dev_type == HCI_BREDR &&
2092 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2093 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002094 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2095 hci_dev_do_close(hdev);
2096 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002097 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2098 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002099 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002100
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002101 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002102 /* For unconfigured devices, set the HCI_RAW flag
2103 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002104 */
2105 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2106 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002107
2108 /* For fully configured devices, this will send
2109 * the Index Added event. For unconfigured devices,
2110 * it will send Unconfigued Index Added event.
2111 *
2112 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2113 * and no event will be send.
2114 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002115 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002116 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002117 /* When the controller is now configured, then it
2118 * is important to clear the HCI_RAW flag.
2119 */
2120 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2121 clear_bit(HCI_RAW, &hdev->flags);
2122
Marcel Holtmannd603b762014-07-06 12:11:14 +02002123 /* Powering on the controller with HCI_CONFIG set only
2124 * happens with the transition from unconfigured to
2125 * configured. This will send the Index Added event.
2126 */
2127 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002128 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002129}
2130
2131static void hci_power_off(struct work_struct *work)
2132{
Johan Hedberg32435532011-11-07 22:16:04 +02002133 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002134 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002135
2136 BT_DBG("%s", hdev->name);
2137
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002138 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002139}
2140
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002141static void hci_discov_off(struct work_struct *work)
2142{
2143 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002144
2145 hdev = container_of(work, struct hci_dev, discov_off.work);
2146
2147 BT_DBG("%s", hdev->name);
2148
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002149 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002150}
2151
Johan Hedberg35f74982014-02-18 17:14:32 +02002152void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002153{
Johan Hedberg48210022013-01-27 00:31:28 +02002154 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002155
Johan Hedberg48210022013-01-27 00:31:28 +02002156 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2157 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002158 kfree(uuid);
2159 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002160}
2161
Johan Hedberg35f74982014-02-18 17:14:32 +02002162void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002163{
Johan Hedberg0378b592014-11-19 15:22:22 +02002164 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002165
Johan Hedberg0378b592014-11-19 15:22:22 +02002166 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2167 list_del_rcu(&key->list);
2168 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002169 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002170}
2171
Johan Hedberg35f74982014-02-18 17:14:32 +02002172void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002173{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002174 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002175
Johan Hedberg970d0f12014-11-13 14:37:47 +02002176 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2177 list_del_rcu(&k->list);
2178 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002179 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002180}
2181
Johan Hedberg970c4e42014-02-18 10:19:33 +02002182void hci_smp_irks_clear(struct hci_dev *hdev)
2183{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002184 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002185
Johan Hedbergadae20c2014-11-13 14:37:48 +02002186 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2187 list_del_rcu(&k->list);
2188 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002189 }
2190}
2191
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002192struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2193{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002194 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002195
Johan Hedberg0378b592014-11-19 15:22:22 +02002196 rcu_read_lock();
2197 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2198 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2199 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002200 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002201 }
2202 }
2203 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002204
2205 return NULL;
2206}
2207
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302208static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002209 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002210{
2211 /* Legacy key */
2212 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302213 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002214
2215 /* Debug keys are insecure so don't store them persistently */
2216 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302217 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002218
2219 /* Changed combination key and there's no previous one */
2220 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302221 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002222
2223 /* Security mode 3 case */
2224 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302225 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002226
Johan Hedberge3befab2014-06-01 16:33:39 +03002227 /* BR/EDR key derived using SC from an LE link */
2228 if (conn->type == LE_LINK)
2229 return true;
2230
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002231 /* Neither local nor remote side had no-bonding as requirement */
2232 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302233 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002234
2235 /* Local side had dedicated bonding as requirement */
2236 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302237 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002238
2239 /* Remote side had dedicated bonding as requirement */
2240 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302241 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002242
2243 /* If none of the above criteria match, then don't store the key
2244 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302245 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002246}
2247
Johan Hedberge804d252014-07-16 11:42:28 +03002248static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002249{
Johan Hedberge804d252014-07-16 11:42:28 +03002250 if (type == SMP_LTK)
2251 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002252
Johan Hedberge804d252014-07-16 11:42:28 +03002253 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002254}
2255
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002256struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2257 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002258{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002259 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002260
Johan Hedberg970d0f12014-11-13 14:37:47 +02002261 rcu_read_lock();
2262 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002263 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2264 continue;
2265
Johan Hedberg923e2412014-12-03 12:43:39 +02002266 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002267 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002268 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002269 }
2270 }
2271 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002272
2273 return NULL;
2274}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002275
Johan Hedberg970c4e42014-02-18 10:19:33 +02002276struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2277{
2278 struct smp_irk *irk;
2279
Johan Hedbergadae20c2014-11-13 14:37:48 +02002280 rcu_read_lock();
2281 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2282 if (!bacmp(&irk->rpa, rpa)) {
2283 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002284 return irk;
2285 }
2286 }
2287
Johan Hedbergadae20c2014-11-13 14:37:48 +02002288 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2289 if (smp_irk_matches(hdev, irk->val, rpa)) {
2290 bacpy(&irk->rpa, rpa);
2291 rcu_read_unlock();
2292 return irk;
2293 }
2294 }
2295 rcu_read_unlock();
2296
Johan Hedberg970c4e42014-02-18 10:19:33 +02002297 return NULL;
2298}
2299
2300struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2301 u8 addr_type)
2302{
2303 struct smp_irk *irk;
2304
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002305 /* Identity Address must be public or static random */
2306 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2307 return NULL;
2308
Johan Hedbergadae20c2014-11-13 14:37:48 +02002309 rcu_read_lock();
2310 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002311 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002312 bacmp(bdaddr, &irk->bdaddr) == 0) {
2313 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002314 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002315 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002316 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002317 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002318
2319 return NULL;
2320}
2321
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002322struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002323 bdaddr_t *bdaddr, u8 *val, u8 type,
2324 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002325{
2326 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302327 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002328
2329 old_key = hci_find_link_key(hdev, bdaddr);
2330 if (old_key) {
2331 old_key_type = old_key->type;
2332 key = old_key;
2333 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002334 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002335 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002336 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002337 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002338 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002339 }
2340
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002341 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002342
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002343 /* Some buggy controller combinations generate a changed
2344 * combination key for legacy pairing even when there's no
2345 * previous key */
2346 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002347 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002348 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002349 if (conn)
2350 conn->key_type = type;
2351 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002352
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002353 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002354 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002355 key->pin_len = pin_len;
2356
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002357 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002358 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002359 else
2360 key->type = type;
2361
Johan Hedberg7652ff62014-06-24 13:15:49 +03002362 if (persistent)
2363 *persistent = hci_persistent_key(hdev, conn, type,
2364 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002365
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002366 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002367}
2368
Johan Hedbergca9142b2014-02-19 14:57:44 +02002369struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002370 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002371 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002372{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002373 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002374 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002375
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002376 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002377 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002378 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002379 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002380 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002381 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002382 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002383 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002384 }
2385
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002386 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002387 key->bdaddr_type = addr_type;
2388 memcpy(key->val, tk, sizeof(key->val));
2389 key->authenticated = authenticated;
2390 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002391 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002392 key->enc_size = enc_size;
2393 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002394
Johan Hedbergca9142b2014-02-19 14:57:44 +02002395 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002396}
2397
Johan Hedbergca9142b2014-02-19 14:57:44 +02002398struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2399 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002400{
2401 struct smp_irk *irk;
2402
2403 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2404 if (!irk) {
2405 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2406 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002407 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002408
2409 bacpy(&irk->bdaddr, bdaddr);
2410 irk->addr_type = addr_type;
2411
Johan Hedbergadae20c2014-11-13 14:37:48 +02002412 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002413 }
2414
2415 memcpy(irk->val, val, 16);
2416 bacpy(&irk->rpa, rpa);
2417
Johan Hedbergca9142b2014-02-19 14:57:44 +02002418 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002419}
2420
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002421int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2422{
2423 struct link_key *key;
2424
2425 key = hci_find_link_key(hdev, bdaddr);
2426 if (!key)
2427 return -ENOENT;
2428
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002429 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002430
Johan Hedberg0378b592014-11-19 15:22:22 +02002431 list_del_rcu(&key->list);
2432 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002433
2434 return 0;
2435}
2436
Johan Hedberge0b2b272014-02-18 17:14:31 +02002437int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002438{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002439 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002440 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002441
Johan Hedberg970d0f12014-11-13 14:37:47 +02002442 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002443 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002444 continue;
2445
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002446 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002447
Johan Hedberg970d0f12014-11-13 14:37:47 +02002448 list_del_rcu(&k->list);
2449 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002450 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002451 }
2452
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002453 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002454}
2455
Johan Hedberga7ec7332014-02-18 17:14:35 +02002456void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2457{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002458 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002459
Johan Hedbergadae20c2014-11-13 14:37:48 +02002460 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002461 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2462 continue;
2463
2464 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2465
Johan Hedbergadae20c2014-11-13 14:37:48 +02002466 list_del_rcu(&k->list);
2467 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002468 }
2469}
2470
Ville Tervo6bd32322011-02-16 16:32:41 +02002471/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002472static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002473{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002474 struct hci_dev *hdev = container_of(work, struct hci_dev,
2475 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002476
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002477 if (hdev->sent_cmd) {
2478 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2479 u16 opcode = __le16_to_cpu(sent->opcode);
2480
2481 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2482 } else {
2483 BT_ERR("%s command tx timeout", hdev->name);
2484 }
2485
Ville Tervo6bd32322011-02-16 16:32:41 +02002486 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002487 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002488}
2489
Szymon Janc2763eda2011-03-22 13:12:22 +01002490struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002491 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002492{
2493 struct oob_data *data;
2494
Johan Hedberg6928a922014-10-26 20:46:09 +01002495 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2496 if (bacmp(bdaddr, &data->bdaddr) != 0)
2497 continue;
2498 if (data->bdaddr_type != bdaddr_type)
2499 continue;
2500 return data;
2501 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002502
2503 return NULL;
2504}
2505
Johan Hedberg6928a922014-10-26 20:46:09 +01002506int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2507 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002508{
2509 struct oob_data *data;
2510
Johan Hedberg6928a922014-10-26 20:46:09 +01002511 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002512 if (!data)
2513 return -ENOENT;
2514
Johan Hedberg6928a922014-10-26 20:46:09 +01002515 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002516
2517 list_del(&data->list);
2518 kfree(data);
2519
2520 return 0;
2521}
2522
Johan Hedberg35f74982014-02-18 17:14:32 +02002523void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002524{
2525 struct oob_data *data, *n;
2526
2527 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2528 list_del(&data->list);
2529 kfree(data);
2530 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002531}
2532
Marcel Holtmann07988722014-01-10 02:07:29 -08002533int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002534 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002535 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002536{
2537 struct oob_data *data;
2538
Johan Hedberg6928a922014-10-26 20:46:09 +01002539 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002540 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002541 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002542 if (!data)
2543 return -ENOMEM;
2544
2545 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002546 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002547 list_add(&data->list, &hdev->remote_oob_data);
2548 }
2549
Johan Hedberg81328d52014-10-26 20:33:47 +01002550 if (hash192 && rand192) {
2551 memcpy(data->hash192, hash192, sizeof(data->hash192));
2552 memcpy(data->rand192, rand192, sizeof(data->rand192));
2553 } else {
2554 memset(data->hash192, 0, sizeof(data->hash192));
2555 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08002556 }
2557
Johan Hedberg81328d52014-10-26 20:33:47 +01002558 if (hash256 && rand256) {
2559 memcpy(data->hash256, hash256, sizeof(data->hash256));
2560 memcpy(data->rand256, rand256, sizeof(data->rand256));
2561 } else {
2562 memset(data->hash256, 0, sizeof(data->hash256));
2563 memset(data->rand256, 0, sizeof(data->rand256));
2564 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002565
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002566 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002567
2568 return 0;
2569}
2570
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002571struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002572 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002573{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002574 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002575
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002576 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002577 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002578 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002579 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002580
2581 return NULL;
2582}
2583
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002584void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002585{
2586 struct list_head *p, *n;
2587
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002588 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002589 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002590
2591 list_del(p);
2592 kfree(b);
2593 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002594}
2595
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002596int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002597{
2598 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002599
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002600 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002601 return -EBADF;
2602
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002603 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002604 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002605
Johan Hedberg27f70f32014-07-21 10:50:06 +03002606 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002607 if (!entry)
2608 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002609
2610 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002611 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002612
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002613 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002614
2615 return 0;
2616}
2617
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002618int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002619{
2620 struct bdaddr_list *entry;
2621
Johan Hedberg35f74982014-02-18 17:14:32 +02002622 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002623 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002624 return 0;
2625 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002626
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002627 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002628 if (!entry)
2629 return -ENOENT;
2630
2631 list_del(&entry->list);
2632 kfree(entry);
2633
2634 return 0;
2635}
2636
Andre Guedes15819a72014-02-03 13:56:18 -03002637/* This function requires the caller holds hdev->lock */
2638struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2639 bdaddr_t *addr, u8 addr_type)
2640{
2641 struct hci_conn_params *params;
2642
Johan Hedberg738f6182014-07-03 19:33:51 +03002643 /* The conn params list only contains identity addresses */
2644 if (!hci_is_identity_address(addr, addr_type))
2645 return NULL;
2646
Andre Guedes15819a72014-02-03 13:56:18 -03002647 list_for_each_entry(params, &hdev->le_conn_params, list) {
2648 if (bacmp(&params->addr, addr) == 0 &&
2649 params->addr_type == addr_type) {
2650 return params;
2651 }
2652 }
2653
2654 return NULL;
2655}
2656
2657/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002658struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2659 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002660{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002661 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002662
Johan Hedberg738f6182014-07-03 19:33:51 +03002663 /* The list only contains identity addresses */
2664 if (!hci_is_identity_address(addr, addr_type))
2665 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002666
Johan Hedberg501f8822014-07-04 12:37:26 +03002667 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002668 if (bacmp(&param->addr, addr) == 0 &&
2669 param->addr_type == addr_type)
2670 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002671 }
2672
2673 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002674}
2675
2676/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002677struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2678 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002679{
2680 struct hci_conn_params *params;
2681
Johan Hedbergc46245b2014-07-02 17:37:33 +03002682 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002683 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03002684
Andre Guedes15819a72014-02-03 13:56:18 -03002685 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002686 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002687 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002688
2689 params = kzalloc(sizeof(*params), GFP_KERNEL);
2690 if (!params) {
2691 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002692 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002693 }
2694
2695 bacpy(&params->addr, addr);
2696 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002697
2698 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002699 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002700
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002701 params->conn_min_interval = hdev->le_conn_min_interval;
2702 params->conn_max_interval = hdev->le_conn_max_interval;
2703 params->conn_latency = hdev->le_conn_latency;
2704 params->supervision_timeout = hdev->le_supv_timeout;
2705 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2706
2707 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2708
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002709 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002710}
2711
Johan Hedbergf6c63242014-08-15 21:06:59 +03002712static void hci_conn_params_free(struct hci_conn_params *params)
2713{
2714 if (params->conn) {
2715 hci_conn_drop(params->conn);
2716 hci_conn_put(params->conn);
2717 }
2718
2719 list_del(&params->action);
2720 list_del(&params->list);
2721 kfree(params);
2722}
2723
Andre Guedes15819a72014-02-03 13:56:18 -03002724/* This function requires the caller holds hdev->lock */
2725void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2726{
2727 struct hci_conn_params *params;
2728
2729 params = hci_conn_params_lookup(hdev, addr, addr_type);
2730 if (!params)
2731 return;
2732
Johan Hedbergf6c63242014-08-15 21:06:59 +03002733 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002734
Johan Hedberg95305ba2014-07-04 12:37:21 +03002735 hci_update_background_scan(hdev);
2736
Andre Guedes15819a72014-02-03 13:56:18 -03002737 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2738}
2739
2740/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03002741void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002742{
2743 struct hci_conn_params *params, *tmp;
2744
2745 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03002746 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2747 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03002748 list_del(&params->list);
2749 kfree(params);
2750 }
2751
Johan Hedberg55af49a2014-07-02 17:37:26 +03002752 BT_DBG("All LE disabled connection parameters were removed");
2753}
2754
2755/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03002756void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002757{
2758 struct hci_conn_params *params, *tmp;
2759
Johan Hedbergf6c63242014-08-15 21:06:59 +03002760 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2761 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002762
Johan Hedberga2f41a82014-07-04 12:37:19 +03002763 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02002764
Andre Guedes15819a72014-02-03 13:56:18 -03002765 BT_DBG("All LE connection parameters were removed");
2766}
2767
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002768static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002769{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002770 if (status) {
2771 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002772
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002773 hci_dev_lock(hdev);
2774 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2775 hci_dev_unlock(hdev);
2776 return;
2777 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002778}
2779
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002780static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002781{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002782 /* General inquiry access code (GIAC) */
2783 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2784 struct hci_request req;
2785 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002786 int err;
2787
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002788 if (status) {
2789 BT_ERR("Failed to disable LE scanning: status %d", status);
2790 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002791 }
2792
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002793 switch (hdev->discovery.type) {
2794 case DISCOV_TYPE_LE:
2795 hci_dev_lock(hdev);
2796 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2797 hci_dev_unlock(hdev);
2798 break;
2799
2800 case DISCOV_TYPE_INTERLEAVED:
2801 hci_req_init(&req, hdev);
2802
2803 memset(&cp, 0, sizeof(cp));
2804 memcpy(&cp.lap, lap, sizeof(cp.lap));
2805 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2806 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2807
2808 hci_dev_lock(hdev);
2809
2810 hci_inquiry_cache_flush(hdev);
2811
2812 err = hci_req_run(&req, inquiry_complete);
2813 if (err) {
2814 BT_ERR("Inquiry request failed: err %d", err);
2815 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2816 }
2817
2818 hci_dev_unlock(hdev);
2819 break;
2820 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002821}
2822
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002823static void le_scan_disable_work(struct work_struct *work)
2824{
2825 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002826 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002827 struct hci_request req;
2828 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002829
2830 BT_DBG("%s", hdev->name);
2831
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002832 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002833
Andre Guedesb1efcc22014-02-26 20:21:40 -03002834 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002835
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002836 err = hci_req_run(&req, le_scan_disable_work_complete);
2837 if (err)
2838 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002839}
2840
Johan Hedberga1f4c312014-02-27 14:05:41 +02002841/* Copy the Identity Address of the controller.
2842 *
2843 * If the controller has a public BD_ADDR, then by default use that one.
2844 * If this is a LE only controller without a public address, default to
2845 * the static random address.
2846 *
2847 * For debugging purposes it is possible to force controllers with a
2848 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002849 *
2850 * In case BR/EDR has been disabled on a dual-mode controller and
2851 * userspace has configured a static address, then that address
2852 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002853 */
2854void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2855 u8 *bdaddr_type)
2856{
Marcel Holtmann111902f2014-06-21 04:53:17 +02002857 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002858 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2859 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2860 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002861 bacpy(bdaddr, &hdev->static_addr);
2862 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2863 } else {
2864 bacpy(bdaddr, &hdev->bdaddr);
2865 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2866 }
2867}
2868
David Herrmann9be0dab2012-04-22 14:39:57 +02002869/* Alloc HCI device */
2870struct hci_dev *hci_alloc_dev(void)
2871{
2872 struct hci_dev *hdev;
2873
Johan Hedberg27f70f32014-07-21 10:50:06 +03002874 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02002875 if (!hdev)
2876 return NULL;
2877
David Herrmannb1b813d2012-04-22 14:39:58 +02002878 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2879 hdev->esco_type = (ESCO_HV1);
2880 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002881 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2882 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02002883 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002884 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2885 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002886
David Herrmannb1b813d2012-04-22 14:39:58 +02002887 hdev->sniff_max_interval = 800;
2888 hdev->sniff_min_interval = 80;
2889
Marcel Holtmann3f959d42014-02-20 11:55:56 -08002890 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02002891 hdev->le_adv_min_interval = 0x0800;
2892 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002893 hdev->le_scan_interval = 0x0060;
2894 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07002895 hdev->le_conn_min_interval = 0x0028;
2896 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02002897 hdev->le_conn_latency = 0x0000;
2898 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01002899 hdev->le_def_tx_len = 0x001b;
2900 hdev->le_def_tx_time = 0x0148;
2901 hdev->le_max_tx_len = 0x001b;
2902 hdev->le_max_tx_time = 0x0148;
2903 hdev->le_max_rx_len = 0x001b;
2904 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07002905
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002906 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01002907 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02002908 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2909 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002910
David Herrmannb1b813d2012-04-22 14:39:58 +02002911 mutex_init(&hdev->lock);
2912 mutex_init(&hdev->req_lock);
2913
2914 INIT_LIST_HEAD(&hdev->mgmt_pending);
2915 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03002916 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02002917 INIT_LIST_HEAD(&hdev->uuids);
2918 INIT_LIST_HEAD(&hdev->link_keys);
2919 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002920 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02002921 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002922 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03002923 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03002924 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03002925 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002926 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002927
2928 INIT_WORK(&hdev->rx_work, hci_rx_work);
2929 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2930 INIT_WORK(&hdev->tx_work, hci_tx_work);
2931 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002932
David Herrmannb1b813d2012-04-22 14:39:58 +02002933 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2934 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2935 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2936
David Herrmannb1b813d2012-04-22 14:39:58 +02002937 skb_queue_head_init(&hdev->rx_q);
2938 skb_queue_head_init(&hdev->cmd_q);
2939 skb_queue_head_init(&hdev->raw_q);
2940
2941 init_waitqueue_head(&hdev->req_wait_q);
2942
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002943 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02002944
David Herrmannb1b813d2012-04-22 14:39:58 +02002945 hci_init_sysfs(hdev);
2946 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002947
2948 return hdev;
2949}
2950EXPORT_SYMBOL(hci_alloc_dev);
2951
2952/* Free HCI device */
2953void hci_free_dev(struct hci_dev *hdev)
2954{
David Herrmann9be0dab2012-04-22 14:39:57 +02002955 /* will free via device release */
2956 put_device(&hdev->dev);
2957}
2958EXPORT_SYMBOL(hci_free_dev);
2959
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960/* Register HCI device */
2961int hci_register_dev(struct hci_dev *hdev)
2962{
David Herrmannb1b813d2012-04-22 14:39:58 +02002963 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964
Marcel Holtmann74292d52014-07-06 15:50:27 +02002965 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 return -EINVAL;
2967
Mat Martineau08add512011-11-02 16:18:36 -07002968 /* Do not allow HCI_AMP devices to register at index 0,
2969 * so the index can be used as the AMP controller ID.
2970 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002971 switch (hdev->dev_type) {
2972 case HCI_BREDR:
2973 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2974 break;
2975 case HCI_AMP:
2976 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2977 break;
2978 default:
2979 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002981
Sasha Levin3df92b32012-05-27 22:36:56 +02002982 if (id < 0)
2983 return id;
2984
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 sprintf(hdev->name, "hci%d", id);
2986 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002987
2988 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2989
Kees Cookd8537542013-07-03 15:04:57 -07002990 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2991 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002992 if (!hdev->workqueue) {
2993 error = -ENOMEM;
2994 goto err;
2995 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002996
Kees Cookd8537542013-07-03 15:04:57 -07002997 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2998 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002999 if (!hdev->req_workqueue) {
3000 destroy_workqueue(hdev->workqueue);
3001 error = -ENOMEM;
3002 goto err;
3003 }
3004
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003005 if (!IS_ERR_OR_NULL(bt_debugfs))
3006 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3007
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003008 dev_set_name(&hdev->dev, "%s", hdev->name);
3009
3010 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003011 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003012 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003014 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003015 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3016 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003017 if (hdev->rfkill) {
3018 if (rfkill_register(hdev->rfkill) < 0) {
3019 rfkill_destroy(hdev->rfkill);
3020 hdev->rfkill = NULL;
3021 }
3022 }
3023
Johan Hedberg5e130362013-09-13 08:58:17 +03003024 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3025 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3026
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003027 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003028 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003029
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003030 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003031 /* Assume BR/EDR support until proven otherwise (such as
3032 * through reading supported features during init.
3033 */
3034 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3035 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003036
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003037 write_lock(&hci_dev_list_lock);
3038 list_add(&hdev->list, &hci_dev_list);
3039 write_unlock(&hci_dev_list_lock);
3040
Marcel Holtmann4a964402014-07-02 19:10:33 +02003041 /* Devices that are marked for raw-only usage are unconfigured
3042 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003043 */
3044 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02003045 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003046
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003048 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049
Johan Hedberg19202572013-01-14 22:33:51 +02003050 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003051
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003053
David Herrmann33ca9542011-10-08 14:58:49 +02003054err_wqueue:
3055 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003056 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003057err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003058 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003059
David Herrmann33ca9542011-10-08 14:58:49 +02003060 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061}
3062EXPORT_SYMBOL(hci_register_dev);
3063
3064/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003065void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066{
Sasha Levin3df92b32012-05-27 22:36:56 +02003067 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003068
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003069 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070
Johan Hovold94324962012-03-15 14:48:41 +01003071 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3072
Sasha Levin3df92b32012-05-27 22:36:56 +02003073 id = hdev->id;
3074
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003075 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003077 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078
3079 hci_dev_do_close(hdev);
3080
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303081 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003082 kfree_skb(hdev->reassembly[i]);
3083
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003084 cancel_work_sync(&hdev->power_on);
3085
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003086 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02003087 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3088 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003089 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003090 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003091 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003092 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003093
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003094 /* mgmt_index_removed should take care of emptying the
3095 * pending list */
3096 BUG_ON(!list_empty(&hdev->mgmt_pending));
3097
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098 hci_notify(hdev, HCI_DEV_UNREG);
3099
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003100 if (hdev->rfkill) {
3101 rfkill_unregister(hdev->rfkill);
3102 rfkill_destroy(hdev->rfkill);
3103 }
3104
Johan Hedberg711eafe2014-08-08 09:32:52 +03003105 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02003106
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003107 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003108
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003109 debugfs_remove_recursive(hdev->debugfs);
3110
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003111 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003112 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003113
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003114 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003115 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003116 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003117 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003118 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003119 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003120 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003121 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003122 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003123 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003124 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003125 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003126
David Herrmanndc946bd2012-01-07 15:47:24 +01003127 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003128
3129 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130}
3131EXPORT_SYMBOL(hci_unregister_dev);
3132
3133/* Suspend HCI device */
3134int hci_suspend_dev(struct hci_dev *hdev)
3135{
3136 hci_notify(hdev, HCI_DEV_SUSPEND);
3137 return 0;
3138}
3139EXPORT_SYMBOL(hci_suspend_dev);
3140
3141/* Resume HCI device */
3142int hci_resume_dev(struct hci_dev *hdev)
3143{
3144 hci_notify(hdev, HCI_DEV_RESUME);
3145 return 0;
3146}
3147EXPORT_SYMBOL(hci_resume_dev);
3148
Marcel Holtmann75e05692014-11-02 08:15:38 +01003149/* Reset HCI device */
3150int hci_reset_dev(struct hci_dev *hdev)
3151{
3152 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3153 struct sk_buff *skb;
3154
3155 skb = bt_skb_alloc(3, GFP_ATOMIC);
3156 if (!skb)
3157 return -ENOMEM;
3158
3159 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3160 memcpy(skb_put(skb, 3), hw_err, 3);
3161
3162 /* Send Hardware Error to upper stack */
3163 return hci_recv_frame(hdev, skb);
3164}
3165EXPORT_SYMBOL(hci_reset_dev);
3166
Marcel Holtmann76bca882009-11-18 00:40:39 +01003167/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003168int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003169{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003170 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003171 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003172 kfree_skb(skb);
3173 return -ENXIO;
3174 }
3175
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003176 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003177 bt_cb(skb)->incoming = 1;
3178
3179 /* Time stamp */
3180 __net_timestamp(skb);
3181
Marcel Holtmann76bca882009-11-18 00:40:39 +01003182 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003183 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003184
Marcel Holtmann76bca882009-11-18 00:40:39 +01003185 return 0;
3186}
3187EXPORT_SYMBOL(hci_recv_frame);
3188
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303189static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003190 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303191{
3192 int len = 0;
3193 int hlen = 0;
3194 int remain = count;
3195 struct sk_buff *skb;
3196 struct bt_skb_cb *scb;
3197
3198 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003199 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303200 return -EILSEQ;
3201
3202 skb = hdev->reassembly[index];
3203
3204 if (!skb) {
3205 switch (type) {
3206 case HCI_ACLDATA_PKT:
3207 len = HCI_MAX_FRAME_SIZE;
3208 hlen = HCI_ACL_HDR_SIZE;
3209 break;
3210 case HCI_EVENT_PKT:
3211 len = HCI_MAX_EVENT_SIZE;
3212 hlen = HCI_EVENT_HDR_SIZE;
3213 break;
3214 case HCI_SCODATA_PKT:
3215 len = HCI_MAX_SCO_SIZE;
3216 hlen = HCI_SCO_HDR_SIZE;
3217 break;
3218 }
3219
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003220 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303221 if (!skb)
3222 return -ENOMEM;
3223
3224 scb = (void *) skb->cb;
3225 scb->expect = hlen;
3226 scb->pkt_type = type;
3227
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303228 hdev->reassembly[index] = skb;
3229 }
3230
3231 while (count) {
3232 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03003233 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303234
3235 memcpy(skb_put(skb, len), data, len);
3236
3237 count -= len;
3238 data += len;
3239 scb->expect -= len;
3240 remain = count;
3241
3242 switch (type) {
3243 case HCI_EVENT_PKT:
3244 if (skb->len == HCI_EVENT_HDR_SIZE) {
3245 struct hci_event_hdr *h = hci_event_hdr(skb);
3246 scb->expect = h->plen;
3247
3248 if (skb_tailroom(skb) < scb->expect) {
3249 kfree_skb(skb);
3250 hdev->reassembly[index] = NULL;
3251 return -ENOMEM;
3252 }
3253 }
3254 break;
3255
3256 case HCI_ACLDATA_PKT:
3257 if (skb->len == HCI_ACL_HDR_SIZE) {
3258 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3259 scb->expect = __le16_to_cpu(h->dlen);
3260
3261 if (skb_tailroom(skb) < scb->expect) {
3262 kfree_skb(skb);
3263 hdev->reassembly[index] = NULL;
3264 return -ENOMEM;
3265 }
3266 }
3267 break;
3268
3269 case HCI_SCODATA_PKT:
3270 if (skb->len == HCI_SCO_HDR_SIZE) {
3271 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3272 scb->expect = h->dlen;
3273
3274 if (skb_tailroom(skb) < scb->expect) {
3275 kfree_skb(skb);
3276 hdev->reassembly[index] = NULL;
3277 return -ENOMEM;
3278 }
3279 }
3280 break;
3281 }
3282
3283 if (scb->expect == 0) {
3284 /* Complete frame */
3285
3286 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003287 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05303288
3289 hdev->reassembly[index] = NULL;
3290 return remain;
3291 }
3292 }
3293
3294 return remain;
3295}
3296
Suraj Sumangala99811512010-07-14 13:02:19 +05303297#define STREAM_REASSEMBLY 0
3298
3299int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3300{
3301 int type;
3302 int rem = 0;
3303
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03003304 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05303305 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3306
3307 if (!skb) {
3308 struct { char type; } *pkt;
3309
3310 /* Start of the frame */
3311 pkt = data;
3312 type = pkt->type;
3313
3314 data++;
3315 count--;
3316 } else
3317 type = bt_cb(skb)->pkt_type;
3318
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03003319 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003320 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05303321 if (rem < 0)
3322 return rem;
3323
3324 data += (count - rem);
3325 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00003326 }
Suraj Sumangala99811512010-07-14 13:02:19 +05303327
3328 return rem;
3329}
3330EXPORT_SYMBOL(hci_recv_stream_fragment);
3331
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332/* ---- Interface to upper protocols ---- */
3333
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334int hci_register_cb(struct hci_cb *cb)
3335{
3336 BT_DBG("%p name %s", cb, cb->name);
3337
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003338 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003340 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341
3342 return 0;
3343}
3344EXPORT_SYMBOL(hci_register_cb);
3345
3346int hci_unregister_cb(struct hci_cb *cb)
3347{
3348 BT_DBG("%p name %s", cb, cb->name);
3349
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003350 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003352 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353
3354 return 0;
3355}
3356EXPORT_SYMBOL(hci_unregister_cb);
3357
Marcel Holtmann51086992013-10-10 14:54:19 -07003358static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003360 int err;
3361
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003362 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003364 /* Time stamp */
3365 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003367 /* Send copy to monitor */
3368 hci_send_to_monitor(hdev, skb);
3369
3370 if (atomic_read(&hdev->promisc)) {
3371 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003372 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 }
3374
3375 /* Get rid of skb owner, prior to sending to the driver. */
3376 skb_orphan(skb);
3377
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003378 err = hdev->send(hdev, skb);
3379 if (err < 0) {
3380 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3381 kfree_skb(skb);
3382 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383}
3384
Marcel Holtmann899de762014-07-11 05:51:58 +02003385bool hci_req_pending(struct hci_dev *hdev)
3386{
3387 return (hdev->req_status == HCI_REQ_PEND);
3388}
3389
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003390/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003391int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3392 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003393{
3394 struct sk_buff *skb;
3395
3396 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3397
3398 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3399 if (!skb) {
3400 BT_ERR("%s no memory for command", hdev->name);
3401 return -ENOMEM;
3402 }
3403
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003404 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003405 * single-command requests.
3406 */
3407 bt_cb(skb)->req.start = true;
3408
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003410 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411
3412 return 0;
3413}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414
3415/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003416void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417{
3418 struct hci_command_hdr *hdr;
3419
3420 if (!hdev->sent_cmd)
3421 return NULL;
3422
3423 hdr = (void *) hdev->sent_cmd->data;
3424
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003425 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 return NULL;
3427
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003428 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429
3430 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3431}
3432
3433/* Send ACL data */
3434static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3435{
3436 struct hci_acl_hdr *hdr;
3437 int len = skb->len;
3438
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003439 skb_push(skb, HCI_ACL_HDR_SIZE);
3440 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003441 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003442 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3443 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444}
3445
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003446static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003447 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003449 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 struct hci_dev *hdev = conn->hdev;
3451 struct sk_buff *list;
3452
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003453 skb->len = skb_headlen(skb);
3454 skb->data_len = 0;
3455
3456 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003457
3458 switch (hdev->dev_type) {
3459 case HCI_BREDR:
3460 hci_add_acl_hdr(skb, conn->handle, flags);
3461 break;
3462 case HCI_AMP:
3463 hci_add_acl_hdr(skb, chan->handle, flags);
3464 break;
3465 default:
3466 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3467 return;
3468 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003469
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003470 list = skb_shinfo(skb)->frag_list;
3471 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472 /* Non fragmented */
3473 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3474
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003475 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 } else {
3477 /* Fragmented */
3478 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3479
3480 skb_shinfo(skb)->frag_list = NULL;
3481
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003482 /* Queue all fragments atomically. We need to use spin_lock_bh
3483 * here because of 6LoWPAN links, as there this function is
3484 * called from softirq and using normal spin lock could cause
3485 * deadlocks.
3486 */
3487 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003489 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003490
3491 flags &= ~ACL_START;
3492 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 do {
3494 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003495
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003496 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003497 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498
3499 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3500
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003501 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502 } while (list);
3503
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003504 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003506}
3507
3508void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3509{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003510 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003511
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003512 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003513
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003514 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003516 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518
3519/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003520void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521{
3522 struct hci_dev *hdev = conn->hdev;
3523 struct hci_sco_hdr hdr;
3524
3525 BT_DBG("%s len %d", hdev->name, skb->len);
3526
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003527 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528 hdr.dlen = skb->len;
3529
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003530 skb_push(skb, HCI_SCO_HDR_SIZE);
3531 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003532 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003534 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003535
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003537 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539
3540/* ---- HCI TX task (outgoing data) ---- */
3541
3542/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003543static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3544 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545{
3546 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003547 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003548 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003550 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003552
3553 rcu_read_lock();
3554
3555 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003556 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003558
3559 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3560 continue;
3561
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 num++;
3563
3564 if (c->sent < min) {
3565 min = c->sent;
3566 conn = c;
3567 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003568
3569 if (hci_conn_num(hdev, type) == num)
3570 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003571 }
3572
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003573 rcu_read_unlock();
3574
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003576 int cnt, q;
3577
3578 switch (conn->type) {
3579 case ACL_LINK:
3580 cnt = hdev->acl_cnt;
3581 break;
3582 case SCO_LINK:
3583 case ESCO_LINK:
3584 cnt = hdev->sco_cnt;
3585 break;
3586 case LE_LINK:
3587 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3588 break;
3589 default:
3590 cnt = 0;
3591 BT_ERR("Unknown link type");
3592 }
3593
3594 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 *quote = q ? q : 1;
3596 } else
3597 *quote = 0;
3598
3599 BT_DBG("conn %p quote %d", conn, *quote);
3600 return conn;
3601}
3602
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003603static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604{
3605 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003606 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
Ville Tervobae1f5d92011-02-10 22:38:53 -03003608 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003610 rcu_read_lock();
3611
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003613 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003614 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003615 BT_ERR("%s killing stalled connection %pMR",
3616 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003617 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 }
3619 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003620
3621 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622}
3623
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003624static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3625 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003626{
3627 struct hci_conn_hash *h = &hdev->conn_hash;
3628 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003629 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003630 struct hci_conn *conn;
3631 int cnt, q, conn_num = 0;
3632
3633 BT_DBG("%s", hdev->name);
3634
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003635 rcu_read_lock();
3636
3637 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003638 struct hci_chan *tmp;
3639
3640 if (conn->type != type)
3641 continue;
3642
3643 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3644 continue;
3645
3646 conn_num++;
3647
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003648 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003649 struct sk_buff *skb;
3650
3651 if (skb_queue_empty(&tmp->data_q))
3652 continue;
3653
3654 skb = skb_peek(&tmp->data_q);
3655 if (skb->priority < cur_prio)
3656 continue;
3657
3658 if (skb->priority > cur_prio) {
3659 num = 0;
3660 min = ~0;
3661 cur_prio = skb->priority;
3662 }
3663
3664 num++;
3665
3666 if (conn->sent < min) {
3667 min = conn->sent;
3668 chan = tmp;
3669 }
3670 }
3671
3672 if (hci_conn_num(hdev, type) == conn_num)
3673 break;
3674 }
3675
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003676 rcu_read_unlock();
3677
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003678 if (!chan)
3679 return NULL;
3680
3681 switch (chan->conn->type) {
3682 case ACL_LINK:
3683 cnt = hdev->acl_cnt;
3684 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003685 case AMP_LINK:
3686 cnt = hdev->block_cnt;
3687 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003688 case SCO_LINK:
3689 case ESCO_LINK:
3690 cnt = hdev->sco_cnt;
3691 break;
3692 case LE_LINK:
3693 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3694 break;
3695 default:
3696 cnt = 0;
3697 BT_ERR("Unknown link type");
3698 }
3699
3700 q = cnt / num;
3701 *quote = q ? q : 1;
3702 BT_DBG("chan %p quote %d", chan, *quote);
3703 return chan;
3704}
3705
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003706static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3707{
3708 struct hci_conn_hash *h = &hdev->conn_hash;
3709 struct hci_conn *conn;
3710 int num = 0;
3711
3712 BT_DBG("%s", hdev->name);
3713
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003714 rcu_read_lock();
3715
3716 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003717 struct hci_chan *chan;
3718
3719 if (conn->type != type)
3720 continue;
3721
3722 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3723 continue;
3724
3725 num++;
3726
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003727 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003728 struct sk_buff *skb;
3729
3730 if (chan->sent) {
3731 chan->sent = 0;
3732 continue;
3733 }
3734
3735 if (skb_queue_empty(&chan->data_q))
3736 continue;
3737
3738 skb = skb_peek(&chan->data_q);
3739 if (skb->priority >= HCI_PRIO_MAX - 1)
3740 continue;
3741
3742 skb->priority = HCI_PRIO_MAX - 1;
3743
3744 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003745 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003746 }
3747
3748 if (hci_conn_num(hdev, type) == num)
3749 break;
3750 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003751
3752 rcu_read_unlock();
3753
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003754}
3755
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003756static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3757{
3758 /* Calculate count of blocks used by this packet */
3759 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3760}
3761
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003762static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763{
Marcel Holtmann4a964402014-07-02 19:10:33 +02003764 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 /* ACL tx timeout must be longer than maximum
3766 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003767 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003768 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003769 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003771}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003772
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003773static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003774{
3775 unsigned int cnt = hdev->acl_cnt;
3776 struct hci_chan *chan;
3777 struct sk_buff *skb;
3778 int quote;
3779
3780 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003781
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003782 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003783 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003784 u32 priority = (skb_peek(&chan->data_q))->priority;
3785 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003786 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003787 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003788
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003789 /* Stop if priority has changed */
3790 if (skb->priority < priority)
3791 break;
3792
3793 skb = skb_dequeue(&chan->data_q);
3794
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003795 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003796 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003797
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003798 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799 hdev->acl_last_tx = jiffies;
3800
3801 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003802 chan->sent++;
3803 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804 }
3805 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003806
3807 if (cnt != hdev->acl_cnt)
3808 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809}
3810
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003811static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003812{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003813 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003814 struct hci_chan *chan;
3815 struct sk_buff *skb;
3816 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003817 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003818
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003819 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003820
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003821 BT_DBG("%s", hdev->name);
3822
3823 if (hdev->dev_type == HCI_AMP)
3824 type = AMP_LINK;
3825 else
3826 type = ACL_LINK;
3827
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003828 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003829 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003830 u32 priority = (skb_peek(&chan->data_q))->priority;
3831 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3832 int blocks;
3833
3834 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003835 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003836
3837 /* Stop if priority has changed */
3838 if (skb->priority < priority)
3839 break;
3840
3841 skb = skb_dequeue(&chan->data_q);
3842
3843 blocks = __get_blocks(hdev, skb);
3844 if (blocks > hdev->block_cnt)
3845 return;
3846
3847 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003848 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003849
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003850 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003851 hdev->acl_last_tx = jiffies;
3852
3853 hdev->block_cnt -= blocks;
3854 quote -= blocks;
3855
3856 chan->sent += blocks;
3857 chan->conn->sent += blocks;
3858 }
3859 }
3860
3861 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003862 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003863}
3864
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003865static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003866{
3867 BT_DBG("%s", hdev->name);
3868
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003869 /* No ACL link over BR/EDR controller */
3870 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3871 return;
3872
3873 /* No AMP link over AMP controller */
3874 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003875 return;
3876
3877 switch (hdev->flow_ctl_mode) {
3878 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3879 hci_sched_acl_pkt(hdev);
3880 break;
3881
3882 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3883 hci_sched_acl_blk(hdev);
3884 break;
3885 }
3886}
3887
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003889static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890{
3891 struct hci_conn *conn;
3892 struct sk_buff *skb;
3893 int quote;
3894
3895 BT_DBG("%s", hdev->name);
3896
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003897 if (!hci_conn_num(hdev, SCO_LINK))
3898 return;
3899
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3901 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3902 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003903 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904
3905 conn->sent++;
3906 if (conn->sent == ~0)
3907 conn->sent = 0;
3908 }
3909 }
3910}
3911
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003912static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003913{
3914 struct hci_conn *conn;
3915 struct sk_buff *skb;
3916 int quote;
3917
3918 BT_DBG("%s", hdev->name);
3919
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003920 if (!hci_conn_num(hdev, ESCO_LINK))
3921 return;
3922
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003923 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3924 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003925 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3926 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003927 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003928
3929 conn->sent++;
3930 if (conn->sent == ~0)
3931 conn->sent = 0;
3932 }
3933 }
3934}
3935
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003936static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003937{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003938 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003939 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003940 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003941
3942 BT_DBG("%s", hdev->name);
3943
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003944 if (!hci_conn_num(hdev, LE_LINK))
3945 return;
3946
Marcel Holtmann4a964402014-07-02 19:10:33 +02003947 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003948 /* LE tx timeout must be longer than maximum
3949 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003950 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003951 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003952 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003953 }
3954
3955 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003956 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003957 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003958 u32 priority = (skb_peek(&chan->data_q))->priority;
3959 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003960 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003961 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003962
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003963 /* Stop if priority has changed */
3964 if (skb->priority < priority)
3965 break;
3966
3967 skb = skb_dequeue(&chan->data_q);
3968
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003969 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003970 hdev->le_last_tx = jiffies;
3971
3972 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003973 chan->sent++;
3974 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003975 }
3976 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003977
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003978 if (hdev->le_pkts)
3979 hdev->le_cnt = cnt;
3980 else
3981 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003982
3983 if (cnt != tmp)
3984 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003985}
3986
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003987static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003989 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990 struct sk_buff *skb;
3991
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003992 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003993 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994
Marcel Holtmann52de5992013-09-03 18:08:38 -07003995 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3996 /* Schedule queues and send stuff to HCI driver */
3997 hci_sched_acl(hdev);
3998 hci_sched_sco(hdev);
3999 hci_sched_esco(hdev);
4000 hci_sched_le(hdev);
4001 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004002
Linus Torvalds1da177e2005-04-16 15:20:36 -07004003 /* Send next queued raw (unknown type) packet */
4004 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004005 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006}
4007
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004008/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009
4010/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004011static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004012{
4013 struct hci_acl_hdr *hdr = (void *) skb->data;
4014 struct hci_conn *conn;
4015 __u16 handle, flags;
4016
4017 skb_pull(skb, HCI_ACL_HDR_SIZE);
4018
4019 handle = __le16_to_cpu(hdr->handle);
4020 flags = hci_flags(handle);
4021 handle = hci_handle(handle);
4022
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004023 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004024 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025
4026 hdev->stat.acl_rx++;
4027
4028 hci_dev_lock(hdev);
4029 conn = hci_conn_hash_lookup_handle(hdev, handle);
4030 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004031
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004033 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004034
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004036 l2cap_recv_acldata(conn, skb, flags);
4037 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004039 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004040 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041 }
4042
4043 kfree_skb(skb);
4044}
4045
4046/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004047static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048{
4049 struct hci_sco_hdr *hdr = (void *) skb->data;
4050 struct hci_conn *conn;
4051 __u16 handle;
4052
4053 skb_pull(skb, HCI_SCO_HDR_SIZE);
4054
4055 handle = __le16_to_cpu(hdr->handle);
4056
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004057 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058
4059 hdev->stat.sco_rx++;
4060
4061 hci_dev_lock(hdev);
4062 conn = hci_conn_hash_lookup_handle(hdev, handle);
4063 hci_dev_unlock(hdev);
4064
4065 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004067 sco_recv_scodata(conn, skb);
4068 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004070 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004071 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072 }
4073
4074 kfree_skb(skb);
4075}
4076
Johan Hedberg9238f362013-03-05 20:37:48 +02004077static bool hci_req_is_complete(struct hci_dev *hdev)
4078{
4079 struct sk_buff *skb;
4080
4081 skb = skb_peek(&hdev->cmd_q);
4082 if (!skb)
4083 return true;
4084
4085 return bt_cb(skb)->req.start;
4086}
4087
Johan Hedberg42c6b122013-03-05 20:37:49 +02004088static void hci_resend_last(struct hci_dev *hdev)
4089{
4090 struct hci_command_hdr *sent;
4091 struct sk_buff *skb;
4092 u16 opcode;
4093
4094 if (!hdev->sent_cmd)
4095 return;
4096
4097 sent = (void *) hdev->sent_cmd->data;
4098 opcode = __le16_to_cpu(sent->opcode);
4099 if (opcode == HCI_OP_RESET)
4100 return;
4101
4102 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4103 if (!skb)
4104 return;
4105
4106 skb_queue_head(&hdev->cmd_q, skb);
4107 queue_work(hdev->workqueue, &hdev->cmd_work);
4108}
4109
Johan Hedberg9238f362013-03-05 20:37:48 +02004110void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4111{
4112 hci_req_complete_t req_complete = NULL;
4113 struct sk_buff *skb;
4114 unsigned long flags;
4115
4116 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4117
Johan Hedberg42c6b122013-03-05 20:37:49 +02004118 /* If the completed command doesn't match the last one that was
4119 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004120 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004121 if (!hci_sent_cmd_data(hdev, opcode)) {
4122 /* Some CSR based controllers generate a spontaneous
4123 * reset complete event during init and any pending
4124 * command will never be completed. In such a case we
4125 * need to resend whatever was the last sent
4126 * command.
4127 */
4128 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4129 hci_resend_last(hdev);
4130
Johan Hedberg9238f362013-03-05 20:37:48 +02004131 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004132 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004133
4134 /* If the command succeeded and there's still more commands in
4135 * this request the request is not yet complete.
4136 */
4137 if (!status && !hci_req_is_complete(hdev))
4138 return;
4139
4140 /* If this was the last command in a request the complete
4141 * callback would be found in hdev->sent_cmd instead of the
4142 * command queue (hdev->cmd_q).
4143 */
4144 if (hdev->sent_cmd) {
4145 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004146
4147 if (req_complete) {
4148 /* We must set the complete callback to NULL to
4149 * avoid calling the callback more than once if
4150 * this function gets called again.
4151 */
4152 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4153
Johan Hedberg9238f362013-03-05 20:37:48 +02004154 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004155 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004156 }
4157
4158 /* Remove all pending commands belonging to this request */
4159 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4160 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4161 if (bt_cb(skb)->req.start) {
4162 __skb_queue_head(&hdev->cmd_q, skb);
4163 break;
4164 }
4165
4166 req_complete = bt_cb(skb)->req.complete;
4167 kfree_skb(skb);
4168 }
4169 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4170
4171call_complete:
4172 if (req_complete)
4173 req_complete(hdev, status);
4174}
4175
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004176static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004178 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179 struct sk_buff *skb;
4180
4181 BT_DBG("%s", hdev->name);
4182
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004184 /* Send copy to monitor */
4185 hci_send_to_monitor(hdev, skb);
4186
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187 if (atomic_read(&hdev->promisc)) {
4188 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004189 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004190 }
4191
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004192 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 kfree_skb(skb);
4194 continue;
4195 }
4196
4197 if (test_bit(HCI_INIT, &hdev->flags)) {
4198 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004199 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200 case HCI_ACLDATA_PKT:
4201 case HCI_SCODATA_PKT:
4202 kfree_skb(skb);
4203 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205 }
4206
4207 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004208 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004210 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211 hci_event_packet(hdev, skb);
4212 break;
4213
4214 case HCI_ACLDATA_PKT:
4215 BT_DBG("%s ACL data packet", hdev->name);
4216 hci_acldata_packet(hdev, skb);
4217 break;
4218
4219 case HCI_SCODATA_PKT:
4220 BT_DBG("%s SCO data packet", hdev->name);
4221 hci_scodata_packet(hdev, skb);
4222 break;
4223
4224 default:
4225 kfree_skb(skb);
4226 break;
4227 }
4228 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229}
4230
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004231static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004233 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 struct sk_buff *skb;
4235
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004236 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4237 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004240 if (atomic_read(&hdev->cmd_cnt)) {
4241 skb = skb_dequeue(&hdev->cmd_q);
4242 if (!skb)
4243 return;
4244
Wei Yongjun7585b972009-02-25 18:29:52 +08004245 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004247 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004248 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004250 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004251 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004252 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004253 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004254 schedule_delayed_work(&hdev->cmd_timer,
4255 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256 } else {
4257 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004258 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259 }
4260 }
4261}