blob: c4802f3bd4c51086de62c048858fb7f6057f3bbb [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020054DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070083 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700109 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700130 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* ---- HCI requests ---- */
143
Johan Hedbergf60cb302015-04-02 13:41:09 +0300144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
145 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200147 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
149 if (hdev->req_status == HCI_REQ_PEND) {
150 hdev->req_result = result;
151 hdev->req_status = HCI_REQ_DONE;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300152 if (skb)
153 hdev->req_skb = skb_get(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 wake_up_interruptible(&hdev->req_wait_q);
155 }
156}
157
158static void hci_req_cancel(struct hci_dev *hdev, int err)
159{
160 BT_DBG("%s err 0x%2.2x", hdev->name, err);
161
162 if (hdev->req_status == HCI_REQ_PEND) {
163 hdev->req_result = err;
164 hdev->req_status = HCI_REQ_CANCELED;
165 wake_up_interruptible(&hdev->req_wait_q);
166 }
167}
168
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300169struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300170 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300171{
172 DECLARE_WAITQUEUE(wait, current);
173 struct hci_request req;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300174 struct sk_buff *skb;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300175 int err = 0;
176
177 BT_DBG("%s", hdev->name);
178
179 hci_req_init(&req, hdev);
180
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300181 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300182
183 hdev->req_status = HCI_REQ_PEND;
184
Johan Hedberg75e84b72013-04-02 13:35:04 +0300185 add_wait_queue(&hdev->req_wait_q, &wait);
186 set_current_state(TASK_INTERRUPTIBLE);
187
Johan Hedbergf60cb302015-04-02 13:41:09 +0300188 err = hci_req_run_skb(&req, hci_req_sync_complete);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900189 if (err < 0) {
190 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200191 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900192 return ERR_PTR(err);
193 }
194
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195 schedule_timeout(timeout);
196
197 remove_wait_queue(&hdev->req_wait_q, &wait);
198
199 if (signal_pending(current))
200 return ERR_PTR(-EINTR);
201
202 switch (hdev->req_status) {
203 case HCI_REQ_DONE:
204 err = -bt_to_errno(hdev->req_result);
205 break;
206
207 case HCI_REQ_CANCELED:
208 err = -hdev->req_result;
209 break;
210
211 default:
212 err = -ETIMEDOUT;
213 break;
214 }
215
216 hdev->req_status = hdev->req_result = 0;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300217 skb = hdev->req_skb;
218 hdev->req_skb = NULL;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300219
220 BT_DBG("%s end: err %d", hdev->name, err);
221
Johan Hedbergf60cb302015-04-02 13:41:09 +0300222 if (err < 0) {
223 kfree_skb(skb);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300224 return ERR_PTR(err);
Johan Hedbergf60cb302015-04-02 13:41:09 +0300225 }
Johan Hedberg75e84b72013-04-02 13:35:04 +0300226
Johan Hedberg757aa0b2015-04-02 13:41:12 +0300227 if (!skb)
228 return ERR_PTR(-ENODATA);
229
230 return skb;
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300231}
232EXPORT_SYMBOL(__hci_cmd_sync_ev);
233
234struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300235 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300236{
237 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300238}
239EXPORT_SYMBOL(__hci_cmd_sync);
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200242static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200243 void (*func)(struct hci_request *req,
244 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200245 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200247 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 DECLARE_WAITQUEUE(wait, current);
249 int err = 0;
250
251 BT_DBG("%s start", hdev->name);
252
Johan Hedberg42c6b122013-03-05 20:37:49 +0200253 hci_req_init(&req, hdev);
254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 hdev->req_status = HCI_REQ_PEND;
256
Johan Hedberg42c6b122013-03-05 20:37:49 +0200257 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200258
Chan-yeol Park039fada2014-10-31 14:23:06 +0900259 add_wait_queue(&hdev->req_wait_q, &wait);
260 set_current_state(TASK_INTERRUPTIBLE);
261
Johan Hedbergf60cb302015-04-02 13:41:09 +0300262 err = hci_req_run_skb(&req, hci_req_sync_complete);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200264 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300265
Chan-yeol Park039fada2014-10-31 14:23:06 +0900266 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200267 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900268
Andre Guedes920c8302013-03-08 11:20:15 -0300269 /* ENODATA means the HCI request command queue is empty.
270 * This can happen when a request with conditionals doesn't
271 * trigger any commands to be sent. This is normal behavior
272 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200273 */
Andre Guedes920c8302013-03-08 11:20:15 -0300274 if (err == -ENODATA)
275 return 0;
276
277 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200278 }
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 schedule_timeout(timeout);
281
282 remove_wait_queue(&hdev->req_wait_q, &wait);
283
284 if (signal_pending(current))
285 return -EINTR;
286
287 switch (hdev->req_status) {
288 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700289 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 break;
291
292 case HCI_REQ_CANCELED:
293 err = -hdev->req_result;
294 break;
295
296 default:
297 err = -ETIMEDOUT;
298 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700299 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Johan Hedberga5040ef2011-01-10 13:28:59 +0200301 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303 BT_DBG("%s end: err %d", hdev->name, err);
304
305 return err;
306}
307
Johan Hedberg01178cd2013-03-05 20:37:41 +0200308static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200309 void (*req)(struct hci_request *req,
310 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200311 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312{
313 int ret;
314
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200315 if (!test_bit(HCI_UP, &hdev->flags))
316 return -ENETDOWN;
317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 /* Serialize all requests */
319 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200320 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 hci_req_unlock(hdev);
322
323 return ret;
324}
325
Johan Hedberg42c6b122013-03-05 20:37:49 +0200326static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200328 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 set_bit(HCI_RESET, &req->hdev->flags);
332 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333}
334
Johan Hedberg42c6b122013-03-05 20:37:49 +0200335static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200337 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200340 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200342 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200343 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344
345 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200346 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}
348
Johan Hedberg0af801b2015-02-17 15:05:21 +0200349static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200350{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200352
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200353 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300355
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700356 /* Read Local Supported Commands */
357 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
358
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300359 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200360 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300361
362 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200363 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700364
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700365 /* Read Flow Control Mode */
366 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
367
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700368 /* Read Location Data */
369 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200370}
371
Johan Hedberg0af801b2015-02-17 15:05:21 +0200372static void amp_init2(struct hci_request *req)
373{
374 /* Read Local Supported Features. Not all AMP controllers
375 * support this so it's placed conditionally in the second
376 * stage init.
377 */
378 if (req->hdev->commands[14] & 0x20)
379 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
380}
381
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200383{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200384 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200385
386 BT_DBG("%s %ld", hdev->name, opt);
387
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300388 /* Reset */
389 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300391
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200392 switch (hdev->dev_type) {
393 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200395 break;
396
397 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200398 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200399 break;
400
401 default:
402 BT_ERR("Unknown device type %d", hdev->dev_type);
403 break;
404 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200405}
406
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200408{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200409 __le16 param;
410 __u8 flt_type;
411
412 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200413 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200414
415 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200416 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200417
418 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200420
421 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200422 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200423
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700424 /* Read Number of Supported IAC */
425 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
426
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700427 /* Read Current IAC LAP */
428 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
429
Johan Hedberg2177bab2013-03-05 20:37:43 +0200430 /* Clear Event Filters */
431 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433
434 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700435 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200436 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200437}
438
Johan Hedberg42c6b122013-03-05 20:37:49 +0200439static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200440{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300441 struct hci_dev *hdev = req->hdev;
442
Johan Hedberg2177bab2013-03-05 20:37:43 +0200443 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445
446 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200447 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200448
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800449 /* Read LE Supported States */
450 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
451
Johan Hedberg2177bab2013-03-05 20:37:43 +0200452 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200454
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800455 /* Clear LE White List */
456 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300457
458 /* LE-only controllers have LE implicitly enabled */
459 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700460 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200461}
462
Johan Hedberg42c6b122013-03-05 20:37:49 +0200463static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200464{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200465 struct hci_dev *hdev = req->hdev;
466
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467 /* The second byte is 0xff instead of 0x9f (two reserved bits
468 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
469 * command otherwise.
470 */
471 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
472
473 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
474 * any event mask for pre 1.2 devices.
475 */
476 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
477 return;
478
479 if (lmp_bredr_capable(hdev)) {
480 events[4] |= 0x01; /* Flow Specification Complete */
481 events[4] |= 0x02; /* Inquiry Result with RSSI */
482 events[4] |= 0x04; /* Read Remote Extended Features Complete */
483 events[5] |= 0x08; /* Synchronous Connection Complete */
484 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700485 } else {
486 /* Use a different default for LE-only devices */
487 memset(events, 0, sizeof(events));
488 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700489 events[1] |= 0x08; /* Read Remote Version Information Complete */
490 events[1] |= 0x20; /* Command Complete */
491 events[1] |= 0x40; /* Command Status */
492 events[1] |= 0x80; /* Hardware Error */
493 events[2] |= 0x04; /* Number of Completed Packets */
494 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200495
496 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
497 events[0] |= 0x80; /* Encryption Change */
498 events[5] |= 0x80; /* Encryption Key Refresh Complete */
499 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200500 }
501
502 if (lmp_inq_rssi_capable(hdev))
503 events[4] |= 0x02; /* Inquiry Result with RSSI */
504
505 if (lmp_sniffsubr_capable(hdev))
506 events[5] |= 0x20; /* Sniff Subrating */
507
508 if (lmp_pause_enc_capable(hdev))
509 events[5] |= 0x80; /* Encryption Key Refresh Complete */
510
511 if (lmp_ext_inq_capable(hdev))
512 events[5] |= 0x40; /* Extended Inquiry Result */
513
514 if (lmp_no_flush_capable(hdev))
515 events[7] |= 0x01; /* Enhanced Flush Complete */
516
517 if (lmp_lsto_capable(hdev))
518 events[6] |= 0x80; /* Link Supervision Timeout Changed */
519
520 if (lmp_ssp_capable(hdev)) {
521 events[6] |= 0x01; /* IO Capability Request */
522 events[6] |= 0x02; /* IO Capability Response */
523 events[6] |= 0x04; /* User Confirmation Request */
524 events[6] |= 0x08; /* User Passkey Request */
525 events[6] |= 0x10; /* Remote OOB Data Request */
526 events[6] |= 0x20; /* Simple Pairing Complete */
527 events[7] |= 0x04; /* User Passkey Notification */
528 events[7] |= 0x08; /* Keypress Notification */
529 events[7] |= 0x10; /* Remote Host Supported
530 * Features Notification
531 */
532 }
533
534 if (lmp_le_capable(hdev))
535 events[7] |= 0x20; /* LE Meta-Event */
536
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200538}
539
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200542 struct hci_dev *hdev = req->hdev;
543
Johan Hedberg0af801b2015-02-17 15:05:21 +0200544 if (hdev->dev_type == HCI_AMP)
545 return amp_init2(req);
546
Johan Hedberg2177bab2013-03-05 20:37:43 +0200547 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300549 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700550 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
552 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100555 /* All Bluetooth 1.2 and later controllers should support the
556 * HCI command for reading the local supported commands.
557 *
558 * Unfortunately some controllers indicate Bluetooth 1.2 support,
559 * but do not have support for this command. If that is the case,
560 * the driver can quirk the behavior and skip reading the local
561 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300562 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100563 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
564 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200565 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200566
567 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700568 /* When SSP is available, then the host features page
569 * should also be available as well. However some
570 * controllers list the max_page as 0 as long as SSP
571 * has not been enabled. To achieve proper debugging
572 * output, force the minimum max_page to 1 at least.
573 */
574 hdev->max_page = 0x01;
575
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700576 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200577 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800578
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
580 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581 } else {
582 struct hci_cp_write_eir cp;
583
584 memset(hdev->eir, 0, sizeof(hdev->eir));
585 memset(&cp, 0, sizeof(cp));
586
Johan Hedberg42c6b122013-03-05 20:37:49 +0200587 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200588 }
589 }
590
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800591 if (lmp_inq_rssi_capable(hdev) ||
592 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800593 u8 mode;
594
595 /* If Extended Inquiry Result events are supported, then
596 * they are clearly preferred over Inquiry Result with RSSI
597 * events.
598 */
599 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
600
601 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
602 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200603
604 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200605 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200606
607 if (lmp_ext_feat_capable(hdev)) {
608 struct hci_cp_read_local_ext_features cp;
609
610 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200611 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
612 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200613 }
614
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700615 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200616 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200617 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
618 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200619 }
620}
621
Johan Hedberg42c6b122013-03-05 20:37:49 +0200622static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200623{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200624 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200625 struct hci_cp_write_def_link_policy cp;
626 u16 link_policy = 0;
627
628 if (lmp_rswitch_capable(hdev))
629 link_policy |= HCI_LP_RSWITCH;
630 if (lmp_hold_capable(hdev))
631 link_policy |= HCI_LP_HOLD;
632 if (lmp_sniff_capable(hdev))
633 link_policy |= HCI_LP_SNIFF;
634 if (lmp_park_capable(hdev))
635 link_policy |= HCI_LP_PARK;
636
637 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200639}
640
Johan Hedberg42c6b122013-03-05 20:37:49 +0200641static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200642{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200643 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200644 struct hci_cp_write_le_host_supported cp;
645
Johan Hedbergc73eee92013-04-19 18:35:21 +0300646 /* LE-only devices do not support explicit enablement */
647 if (!lmp_bredr_capable(hdev))
648 return;
649
Johan Hedberg2177bab2013-03-05 20:37:43 +0200650 memset(&cp, 0, sizeof(cp));
651
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700652 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200653 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200654 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200655 }
656
657 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200658 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
659 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660}
661
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300662static void hci_set_event_mask_page_2(struct hci_request *req)
663{
664 struct hci_dev *hdev = req->hdev;
665 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
666
667 /* If Connectionless Slave Broadcast master role is supported
668 * enable all necessary events for it.
669 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800670 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300671 events[1] |= 0x40; /* Triggered Clock Capture */
672 events[1] |= 0x80; /* Synchronization Train Complete */
673 events[2] |= 0x10; /* Slave Page Response Timeout */
674 events[2] |= 0x20; /* CSB Channel Map Change */
675 }
676
677 /* If Connectionless Slave Broadcast slave role is supported
678 * enable all necessary events for it.
679 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800680 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300681 events[2] |= 0x01; /* Synchronization Train Received */
682 events[2] |= 0x02; /* CSB Receive */
683 events[2] |= 0x04; /* CSB Timeout */
684 events[2] |= 0x08; /* Truncated Page Complete */
685 }
686
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800687 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200688 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800689 events[2] |= 0x80;
690
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300691 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
692}
693
Johan Hedberg42c6b122013-03-05 20:37:49 +0200694static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200695{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200696 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300697 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200698
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200699 hci_setup_event_mask(req);
700
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800701 if (hdev->commands[6] & 0x20) {
702 struct hci_cp_read_stored_link_key cp;
703
704 bacpy(&cp.bdaddr, BDADDR_ANY);
705 cp.read_all = 0x01;
706 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
707 }
708
Johan Hedberg2177bab2013-03-05 20:37:43 +0200709 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200710 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200711
Marcel Holtmann417287d2014-12-11 20:21:54 +0100712 if (hdev->commands[8] & 0x01)
713 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
714
715 /* Some older Broadcom based Bluetooth 1.2 controllers do not
716 * support the Read Page Scan Type command. Check support for
717 * this command in the bit mask of supported commands.
718 */
719 if (hdev->commands[13] & 0x01)
720 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
721
Andre Guedes9193c6e2014-07-01 18:10:09 -0300722 if (lmp_le_capable(hdev)) {
723 u8 events[8];
724
725 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200726 events[0] = 0x0f;
727
728 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
729 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300730
731 /* If controller supports the Connection Parameters Request
732 * Link Layer Procedure, enable the corresponding event.
733 */
734 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
735 events[0] |= 0x20; /* LE Remote Connection
736 * Parameter Request
737 */
738
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100739 /* If the controller supports the Data Length Extension
740 * feature, enable the corresponding event.
741 */
742 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
743 events[0] |= 0x40; /* LE Data Length Change */
744
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100745 /* If the controller supports Extended Scanner Filter
746 * Policies, enable the correspondig event.
747 */
748 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
749 events[1] |= 0x04; /* LE Direct Advertising
750 * Report
751 */
752
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100753 /* If the controller supports the LE Read Local P-256
754 * Public Key command, enable the corresponding event.
755 */
756 if (hdev->commands[34] & 0x02)
757 events[0] |= 0x80; /* LE Read Local P-256
758 * Public Key Complete
759 */
760
761 /* If the controller supports the LE Generate DHKey
762 * command, enable the corresponding event.
763 */
764 if (hdev->commands[34] & 0x04)
765 events[1] |= 0x01; /* LE Generate DHKey Complete */
766
Andre Guedes9193c6e2014-07-01 18:10:09 -0300767 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
768 events);
769
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200770 if (hdev->commands[25] & 0x40) {
771 /* Read LE Advertising Channel TX Power */
772 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
773 }
774
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100775 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
776 /* Read LE Maximum Data Length */
777 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
778
779 /* Read LE Suggested Default Data Length */
780 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
781 }
782
Johan Hedberg42c6b122013-03-05 20:37:49 +0200783 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300784 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300785
786 /* Read features beyond page 1 if available */
787 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
788 struct hci_cp_read_local_ext_features cp;
789
790 cp.page = p;
791 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
792 sizeof(cp), &cp);
793 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200794}
795
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300796static void hci_init4_req(struct hci_request *req, unsigned long opt)
797{
798 struct hci_dev *hdev = req->hdev;
799
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800800 /* Some Broadcom based Bluetooth controllers do not support the
801 * Delete Stored Link Key command. They are clearly indicating its
802 * absence in the bit mask of supported commands.
803 *
804 * Check the supported commands and only if the the command is marked
805 * as supported send it. If not supported assume that the controller
806 * does not have actual support for stored link keys which makes this
807 * command redundant anyway.
808 *
809 * Some controllers indicate that they support handling deleting
810 * stored link keys, but they don't. The quirk lets a driver
811 * just disable this command.
812 */
813 if (hdev->commands[6] & 0x80 &&
814 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
815 struct hci_cp_delete_stored_link_key cp;
816
817 bacpy(&cp.bdaddr, BDADDR_ANY);
818 cp.delete_all = 0x01;
819 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
820 sizeof(cp), &cp);
821 }
822
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300823 /* Set event mask page 2 if the HCI command for it is supported */
824 if (hdev->commands[22] & 0x04)
825 hci_set_event_mask_page_2(req);
826
Marcel Holtmann109e3192014-07-23 19:24:56 +0200827 /* Read local codec list if the HCI command is supported */
828 if (hdev->commands[29] & 0x20)
829 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
830
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200831 /* Get MWS transport configuration if the HCI command is supported */
832 if (hdev->commands[30] & 0x08)
833 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
834
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300835 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800836 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300837 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800838
839 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700840 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800841 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800842 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800843
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800844 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
845 sizeof(support), &support);
846 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300847}
848
Johan Hedberg2177bab2013-03-05 20:37:43 +0200849static int __hci_init(struct hci_dev *hdev)
850{
851 int err;
852
853 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
854 if (err < 0)
855 return err;
856
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700857 /* The Device Under Test (DUT) mode is special and available for
858 * all controller types. So just create it early on.
859 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700860 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700861 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
862 &dut_mode_fops);
863 }
864
Johan Hedberg2177bab2013-03-05 20:37:43 +0200865 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
866 if (err < 0)
867 return err;
868
Johan Hedberg0af801b2015-02-17 15:05:21 +0200869 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
870 * BR/EDR/LE type controllers. AMP controllers only need the
871 * first two stages of init.
872 */
873 if (hdev->dev_type != HCI_BREDR)
874 return 0;
875
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300876 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
877 if (err < 0)
878 return err;
879
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700880 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
881 if (err < 0)
882 return err;
883
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800884 /* This function is only called when the controller is actually in
885 * configured state. When the controller is marked as unconfigured,
886 * this initialization procedure is not run.
887 *
888 * It means that it is possible that a controller runs through its
889 * setup phase and then discovers missing settings. If that is the
890 * case, then this function will not be called. It then will only
891 * be called during the config phase.
892 *
893 * So only when in setup phase or config phase, create the debugfs
894 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700895 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700896 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
897 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700898 return 0;
899
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100900 hci_debugfs_create_common(hdev);
901
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100902 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100903 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700904
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800905 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100906 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700907
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700908 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200909}
910
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200911static void hci_init0_req(struct hci_request *req, unsigned long opt)
912{
913 struct hci_dev *hdev = req->hdev;
914
915 BT_DBG("%s %ld", hdev->name, opt);
916
917 /* Reset */
918 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
919 hci_reset_req(req, 0);
920
921 /* Read Local Version */
922 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
923
924 /* Read BD Address */
925 if (hdev->set_bdaddr)
926 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
927}
928
929static int __hci_unconf_init(struct hci_dev *hdev)
930{
931 int err;
932
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200933 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
934 return 0;
935
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200936 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
937 if (err < 0)
938 return err;
939
940 return 0;
941}
942
Johan Hedberg42c6b122013-03-05 20:37:49 +0200943static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944{
945 __u8 scan = opt;
946
Johan Hedberg42c6b122013-03-05 20:37:49 +0200947 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
949 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
952
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
955 __u8 auth = opt;
956
Johan Hedberg42c6b122013-03-05 20:37:49 +0200957 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
959 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200960 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961}
962
Johan Hedberg42c6b122013-03-05 20:37:49 +0200963static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964{
965 __u8 encrypt = opt;
966
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200969 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971}
972
Johan Hedberg42c6b122013-03-05 20:37:49 +0200973static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200974{
975 __le16 policy = cpu_to_le16(opt);
976
Johan Hedberg42c6b122013-03-05 20:37:49 +0200977 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200978
979 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200980 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200981}
982
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900983/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 * Device is held on return. */
985struct hci_dev *hci_dev_get(int index)
986{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200987 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
989 BT_DBG("%d", index);
990
991 if (index < 0)
992 return NULL;
993
994 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200995 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 if (d->id == index) {
997 hdev = hci_dev_hold(d);
998 break;
999 }
1000 }
1001 read_unlock(&hci_dev_list_lock);
1002 return hdev;
1003}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001006
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001007bool hci_discovery_active(struct hci_dev *hdev)
1008{
1009 struct discovery_state *discov = &hdev->discovery;
1010
Andre Guedes6fbe1952012-02-03 17:47:58 -03001011 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001012 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001013 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001014 return true;
1015
Andre Guedes6fbe1952012-02-03 17:47:58 -03001016 default:
1017 return false;
1018 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001019}
1020
Johan Hedbergff9ef572012-01-04 14:23:45 +02001021void hci_discovery_set_state(struct hci_dev *hdev, int state)
1022{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001023 int old_state = hdev->discovery.state;
1024
Johan Hedbergff9ef572012-01-04 14:23:45 +02001025 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1026
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001027 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001028 return;
1029
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001030 hdev->discovery.state = state;
1031
Johan Hedbergff9ef572012-01-04 14:23:45 +02001032 switch (state) {
1033 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001034 hci_update_background_scan(hdev);
1035
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001036 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001037 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001038 break;
1039 case DISCOVERY_STARTING:
1040 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001041 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001042 mgmt_discovering(hdev, 1);
1043 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001044 case DISCOVERY_RESOLVING:
1045 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001046 case DISCOVERY_STOPPING:
1047 break;
1048 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001049}
1050
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001051void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052{
Johan Hedberg30883512012-01-04 14:16:21 +02001053 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001054 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055
Johan Hedberg561aafb2012-01-04 13:31:59 +02001056 list_for_each_entry_safe(p, n, &cache->all, all) {
1057 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001058 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001060
1061 INIT_LIST_HEAD(&cache->unknown);
1062 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063}
1064
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001065struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1066 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067{
Johan Hedberg30883512012-01-04 14:16:21 +02001068 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 struct inquiry_entry *e;
1070
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001071 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072
Johan Hedberg561aafb2012-01-04 13:31:59 +02001073 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001075 return e;
1076 }
1077
1078 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079}
1080
Johan Hedberg561aafb2012-01-04 13:31:59 +02001081struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001082 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001083{
Johan Hedberg30883512012-01-04 14:16:21 +02001084 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001085 struct inquiry_entry *e;
1086
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001087 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001088
1089 list_for_each_entry(e, &cache->unknown, list) {
1090 if (!bacmp(&e->data.bdaddr, bdaddr))
1091 return e;
1092 }
1093
1094 return NULL;
1095}
1096
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001097struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001098 bdaddr_t *bdaddr,
1099 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001100{
1101 struct discovery_state *cache = &hdev->discovery;
1102 struct inquiry_entry *e;
1103
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001104 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001105
1106 list_for_each_entry(e, &cache->resolve, list) {
1107 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1108 return e;
1109 if (!bacmp(&e->data.bdaddr, bdaddr))
1110 return e;
1111 }
1112
1113 return NULL;
1114}
1115
Johan Hedberga3d4e202012-01-09 00:53:02 +02001116void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001117 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001118{
1119 struct discovery_state *cache = &hdev->discovery;
1120 struct list_head *pos = &cache->resolve;
1121 struct inquiry_entry *p;
1122
1123 list_del(&ie->list);
1124
1125 list_for_each_entry(p, &cache->resolve, list) {
1126 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001127 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001128 break;
1129 pos = &p->list;
1130 }
1131
1132 list_add(&ie->list, pos);
1133}
1134
Marcel Holtmannaf589252014-07-01 14:11:20 +02001135u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1136 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137{
Johan Hedberg30883512012-01-04 14:16:21 +02001138 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001139 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001140 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001142 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
Johan Hedberg6928a922014-10-26 20:46:09 +01001144 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001145
Marcel Holtmannaf589252014-07-01 14:11:20 +02001146 if (!data->ssp_mode)
1147 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001148
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001149 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001150 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001151 if (!ie->data.ssp_mode)
1152 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001153
Johan Hedberga3d4e202012-01-09 00:53:02 +02001154 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001155 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001156 ie->data.rssi = data->rssi;
1157 hci_inquiry_cache_update_resolve(hdev, ie);
1158 }
1159
Johan Hedberg561aafb2012-01-04 13:31:59 +02001160 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001161 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001162
Johan Hedberg561aafb2012-01-04 13:31:59 +02001163 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001164 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001165 if (!ie) {
1166 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1167 goto done;
1168 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001169
1170 list_add(&ie->all, &cache->all);
1171
1172 if (name_known) {
1173 ie->name_state = NAME_KNOWN;
1174 } else {
1175 ie->name_state = NAME_NOT_KNOWN;
1176 list_add(&ie->list, &cache->unknown);
1177 }
1178
1179update:
1180 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001181 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001182 ie->name_state = NAME_KNOWN;
1183 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 }
1185
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001186 memcpy(&ie->data, data, sizeof(*data));
1187 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001189
1190 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001191 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001192
Marcel Holtmannaf589252014-07-01 14:11:20 +02001193done:
1194 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195}
1196
1197static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1198{
Johan Hedberg30883512012-01-04 14:16:21 +02001199 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 struct inquiry_info *info = (struct inquiry_info *) buf;
1201 struct inquiry_entry *e;
1202 int copied = 0;
1203
Johan Hedberg561aafb2012-01-04 13:31:59 +02001204 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001206
1207 if (copied >= num)
1208 break;
1209
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 bacpy(&info->bdaddr, &data->bdaddr);
1211 info->pscan_rep_mode = data->pscan_rep_mode;
1212 info->pscan_period_mode = data->pscan_period_mode;
1213 info->pscan_mode = data->pscan_mode;
1214 memcpy(info->dev_class, data->dev_class, 3);
1215 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001216
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001218 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 }
1220
1221 BT_DBG("cache %p, copied %d", cache, copied);
1222 return copied;
1223}
1224
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226{
1227 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 struct hci_cp_inquiry cp;
1230
1231 BT_DBG("%s", hdev->name);
1232
1233 if (test_bit(HCI_INQUIRY, &hdev->flags))
1234 return;
1235
1236 /* Start Inquiry */
1237 memcpy(&cp.lap, &ir->lap, 3);
1238 cp.length = ir->length;
1239 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001240 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241}
1242
1243int hci_inquiry(void __user *arg)
1244{
1245 __u8 __user *ptr = arg;
1246 struct hci_inquiry_req ir;
1247 struct hci_dev *hdev;
1248 int err = 0, do_inquiry = 0, max_rsp;
1249 long timeo;
1250 __u8 *buf;
1251
1252 if (copy_from_user(&ir, ptr, sizeof(ir)))
1253 return -EFAULT;
1254
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001255 hdev = hci_dev_get(ir.dev_id);
1256 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 return -ENODEV;
1258
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001259 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001260 err = -EBUSY;
1261 goto done;
1262 }
1263
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001264 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001265 err = -EOPNOTSUPP;
1266 goto done;
1267 }
1268
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001269 if (hdev->dev_type != HCI_BREDR) {
1270 err = -EOPNOTSUPP;
1271 goto done;
1272 }
1273
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001274 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001275 err = -EOPNOTSUPP;
1276 goto done;
1277 }
1278
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001279 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001280 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001281 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001282 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 do_inquiry = 1;
1284 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001285 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
Marcel Holtmann04837f62006-07-03 10:02:33 +02001287 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001288
1289 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001290 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1291 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001292 if (err < 0)
1293 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001294
1295 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1296 * cleared). If it is interrupted by a signal, return -EINTR.
1297 */
NeilBrown74316202014-07-07 15:16:04 +10001298 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001299 TASK_INTERRUPTIBLE))
1300 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001301 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001303 /* for unlimited number of responses we will use buffer with
1304 * 255 entries
1305 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1307
1308 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1309 * copy it to the user space.
1310 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001311 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001312 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 err = -ENOMEM;
1314 goto done;
1315 }
1316
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001317 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001319 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321 BT_DBG("num_rsp %d", ir.num_rsp);
1322
1323 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1324 ptr += sizeof(ir);
1325 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001326 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001328 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 err = -EFAULT;
1330
1331 kfree(buf);
1332
1333done:
1334 hci_dev_put(hdev);
1335 return err;
1336}
1337
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001338static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 int ret = 0;
1341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 BT_DBG("%s %p", hdev->name, hdev);
1343
1344 hci_req_lock(hdev);
1345
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001346 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001347 ret = -ENODEV;
1348 goto done;
1349 }
1350
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001351 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1352 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001353 /* Check for rfkill but allow the HCI setup stage to
1354 * proceed (which in itself doesn't cause any RF activity).
1355 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001356 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001357 ret = -ERFKILL;
1358 goto done;
1359 }
1360
1361 /* Check for valid public address or a configured static
1362 * random adddress, but let the HCI setup proceed to
1363 * be able to determine if there is a public address
1364 * or not.
1365 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001366 * In case of user channel usage, it is not important
1367 * if a public address or static random address is
1368 * available.
1369 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001370 * This check is only valid for BR/EDR controllers
1371 * since AMP controllers do not have an address.
1372 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001373 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001374 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001375 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1376 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1377 ret = -EADDRNOTAVAIL;
1378 goto done;
1379 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001380 }
1381
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 if (test_bit(HCI_UP, &hdev->flags)) {
1383 ret = -EALREADY;
1384 goto done;
1385 }
1386
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 if (hdev->open(hdev)) {
1388 ret = -EIO;
1389 goto done;
1390 }
1391
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001392 atomic_set(&hdev->cmd_cnt, 1);
1393 set_bit(HCI_INIT, &hdev->flags);
1394
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001395 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001396 if (hdev->setup)
1397 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001398
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001399 /* The transport driver can set these quirks before
1400 * creating the HCI device or in its setup callback.
1401 *
1402 * In case any of them is set, the controller has to
1403 * start up as unconfigured.
1404 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001405 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1406 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001407 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001408
1409 /* For an unconfigured controller it is required to
1410 * read at least the version information provided by
1411 * the Read Local Version Information command.
1412 *
1413 * If the set_bdaddr driver callback is provided, then
1414 * also the original Bluetooth public device address
1415 * will be read using the Read BD Address command.
1416 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001417 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001418 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001419 }
1420
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001421 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001422 /* If public address change is configured, ensure that
1423 * the address gets programmed. If the driver does not
1424 * support changing the public address, fail the power
1425 * on procedure.
1426 */
1427 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1428 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001429 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1430 else
1431 ret = -EADDRNOTAVAIL;
1432 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001433
1434 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001435 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1436 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001437 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 }
1439
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001440 clear_bit(HCI_INIT, &hdev->flags);
1441
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 if (!ret) {
1443 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001444 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 set_bit(HCI_UP, &hdev->flags);
1446 hci_notify(hdev, HCI_DEV_UP);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001447 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1448 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1449 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1450 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001451 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001452 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001453 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001454 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001455 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001456 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001458 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001459 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001460 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461
1462 skb_queue_purge(&hdev->cmd_q);
1463 skb_queue_purge(&hdev->rx_q);
1464
1465 if (hdev->flush)
1466 hdev->flush(hdev);
1467
1468 if (hdev->sent_cmd) {
1469 kfree_skb(hdev->sent_cmd);
1470 hdev->sent_cmd = NULL;
1471 }
1472
1473 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001474 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 }
1476
1477done:
1478 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 return ret;
1480}
1481
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001482/* ---- HCI ioctl helpers ---- */
1483
1484int hci_dev_open(__u16 dev)
1485{
1486 struct hci_dev *hdev;
1487 int err;
1488
1489 hdev = hci_dev_get(dev);
1490 if (!hdev)
1491 return -ENODEV;
1492
Marcel Holtmann4a964402014-07-02 19:10:33 +02001493 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001494 * up as user channel. Trying to bring them up as normal devices
1495 * will result into a failure. Only user channel operation is
1496 * possible.
1497 *
1498 * When this function is called for a user channel, the flag
1499 * HCI_USER_CHANNEL will be set first before attempting to
1500 * open the device.
1501 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001502 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1503 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001504 err = -EOPNOTSUPP;
1505 goto done;
1506 }
1507
Johan Hedberge1d08f42013-10-01 22:44:50 +03001508 /* We need to ensure that no other power on/off work is pending
1509 * before proceeding to call hci_dev_do_open. This is
1510 * particularly important if the setup procedure has not yet
1511 * completed.
1512 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001513 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001514 cancel_delayed_work(&hdev->power_off);
1515
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001516 /* After this call it is guaranteed that the setup procedure
1517 * has finished. This means that error conditions like RFKILL
1518 * or no valid public or static random address apply.
1519 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001520 flush_workqueue(hdev->req_workqueue);
1521
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001522 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001523 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001524 * so that pairing works for them. Once the management interface
1525 * is in use this bit will be cleared again and userspace has
1526 * to explicitly enable it.
1527 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001528 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1529 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001530 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001531
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001532 err = hci_dev_do_open(hdev);
1533
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001534done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001535 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001536 return err;
1537}
1538
Johan Hedbergd7347f32014-07-04 12:37:23 +03001539/* This function requires the caller holds hdev->lock */
1540static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1541{
1542 struct hci_conn_params *p;
1543
Johan Hedbergf161dd42014-08-15 21:06:54 +03001544 list_for_each_entry(p, &hdev->le_conn_params, list) {
1545 if (p->conn) {
1546 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001547 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001548 p->conn = NULL;
1549 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001550 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001551 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001552
1553 BT_DBG("All LE pending actions cleared");
1554}
1555
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556static int hci_dev_do_close(struct hci_dev *hdev)
1557{
1558 BT_DBG("%s %p", hdev->name, hdev);
1559
Gabriele Mazzottad24d8142015-04-26 20:51:50 +02001560 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1561 test_bit(HCI_UP, &hdev->flags)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001562 /* Execute vendor specific shutdown routine */
1563 if (hdev->shutdown)
1564 hdev->shutdown(hdev);
1565 }
1566
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001567 cancel_delayed_work(&hdev->power_off);
1568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 hci_req_cancel(hdev, ENODEV);
1570 hci_req_lock(hdev);
1571
1572 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001573 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 hci_req_unlock(hdev);
1575 return 0;
1576 }
1577
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001578 /* Flush RX and TX works */
1579 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001580 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001582 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001583 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001584 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001585 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1586 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001587 }
1588
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001589 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001590 cancel_delayed_work(&hdev->service_cache);
1591
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001592 cancel_delayed_work_sync(&hdev->le_scan_disable);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08001593 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001594
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001595 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001596 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001597
Johan Hedberg76727c02014-11-18 09:00:14 +02001598 /* Avoid potential lockdep warnings from the *_flush() calls by
1599 * ensuring the workqueue is empty up front.
1600 */
1601 drain_workqueue(hdev->workqueue);
1602
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001603 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001604
Johan Hedberg8f502f82015-01-28 19:56:02 +02001605 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1606
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001607 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001608 if (hdev->dev_type == HCI_BREDR)
1609 mgmt_powered(hdev, 0);
1610 }
1611
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001612 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001613 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001614 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001615 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
Marcel Holtmann64dae962015-01-28 14:10:28 -08001617 smp_unregister(hdev);
1618
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 hci_notify(hdev, HCI_DEV_DOWN);
1620
1621 if (hdev->flush)
1622 hdev->flush(hdev);
1623
1624 /* Reset device */
1625 skb_queue_purge(&hdev->cmd_q);
1626 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001627 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1628 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001629 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001631 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 clear_bit(HCI_INIT, &hdev->flags);
1633 }
1634
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001635 /* flush cmd work */
1636 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637
1638 /* Drop queues */
1639 skb_queue_purge(&hdev->rx_q);
1640 skb_queue_purge(&hdev->cmd_q);
1641 skb_queue_purge(&hdev->raw_q);
1642
1643 /* Drop last sent command */
1644 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001645 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 kfree_skb(hdev->sent_cmd);
1647 hdev->sent_cmd = NULL;
1648 }
1649
1650 /* After this point our queues are empty
1651 * and no tasks are scheduled. */
1652 hdev->close(hdev);
1653
Johan Hedberg35b973c2013-03-15 17:06:59 -05001654 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001655 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001656 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001657
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001658 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001659 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001660
Johan Hedberge59fda82012-02-22 18:11:53 +02001661 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001662 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001663 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001664
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 hci_req_unlock(hdev);
1666
1667 hci_dev_put(hdev);
1668 return 0;
1669}
1670
1671int hci_dev_close(__u16 dev)
1672{
1673 struct hci_dev *hdev;
1674 int err;
1675
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001676 hdev = hci_dev_get(dev);
1677 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001679
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001680 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001681 err = -EBUSY;
1682 goto done;
1683 }
1684
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001685 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001686 cancel_delayed_work(&hdev->power_off);
1687
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001689
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001690done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 hci_dev_put(hdev);
1692 return err;
1693}
1694
Marcel Holtmann5c912492015-01-28 11:53:05 -08001695static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001697 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698
Marcel Holtmann5c912492015-01-28 11:53:05 -08001699 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700
1701 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 /* Drop queues */
1704 skb_queue_purge(&hdev->rx_q);
1705 skb_queue_purge(&hdev->cmd_q);
1706
Johan Hedberg76727c02014-11-18 09:00:14 +02001707 /* Avoid potential lockdep warnings from the *_flush() calls by
1708 * ensuring the workqueue is empty up front.
1709 */
1710 drain_workqueue(hdev->workqueue);
1711
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001712 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001713 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001715 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716
1717 if (hdev->flush)
1718 hdev->flush(hdev);
1719
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001720 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001721 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001723 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 return ret;
1727}
1728
Marcel Holtmann5c912492015-01-28 11:53:05 -08001729int hci_dev_reset(__u16 dev)
1730{
1731 struct hci_dev *hdev;
1732 int err;
1733
1734 hdev = hci_dev_get(dev);
1735 if (!hdev)
1736 return -ENODEV;
1737
1738 if (!test_bit(HCI_UP, &hdev->flags)) {
1739 err = -ENETDOWN;
1740 goto done;
1741 }
1742
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001743 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001744 err = -EBUSY;
1745 goto done;
1746 }
1747
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001748 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001749 err = -EOPNOTSUPP;
1750 goto done;
1751 }
1752
1753 err = hci_dev_do_reset(hdev);
1754
1755done:
1756 hci_dev_put(hdev);
1757 return err;
1758}
1759
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760int hci_dev_reset_stat(__u16 dev)
1761{
1762 struct hci_dev *hdev;
1763 int ret = 0;
1764
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001765 hdev = hci_dev_get(dev);
1766 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 return -ENODEV;
1768
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001769 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001770 ret = -EBUSY;
1771 goto done;
1772 }
1773
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001774 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001775 ret = -EOPNOTSUPP;
1776 goto done;
1777 }
1778
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1780
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001781done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 return ret;
1784}
1785
Johan Hedberg123abc02014-07-10 12:09:07 +03001786static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1787{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001788 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001789
1790 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1791
1792 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001793 conn_changed = !hci_dev_test_and_set_flag(hdev,
1794 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001795 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001796 conn_changed = hci_dev_test_and_clear_flag(hdev,
1797 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001798
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001799 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001800 discov_changed = !hci_dev_test_and_set_flag(hdev,
1801 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001802 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001803 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001804 discov_changed = hci_dev_test_and_clear_flag(hdev,
1805 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001806 }
1807
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001808 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001809 return;
1810
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001811 if (conn_changed || discov_changed) {
1812 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001813 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001814
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001815 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001816 mgmt_update_adv_data(hdev);
1817
Johan Hedberg123abc02014-07-10 12:09:07 +03001818 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001819 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001820}
1821
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822int hci_dev_cmd(unsigned int cmd, void __user *arg)
1823{
1824 struct hci_dev *hdev;
1825 struct hci_dev_req dr;
1826 int err = 0;
1827
1828 if (copy_from_user(&dr, arg, sizeof(dr)))
1829 return -EFAULT;
1830
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001831 hdev = hci_dev_get(dr.dev_id);
1832 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 return -ENODEV;
1834
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001835 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001836 err = -EBUSY;
1837 goto done;
1838 }
1839
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001840 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001841 err = -EOPNOTSUPP;
1842 goto done;
1843 }
1844
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001845 if (hdev->dev_type != HCI_BREDR) {
1846 err = -EOPNOTSUPP;
1847 goto done;
1848 }
1849
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001850 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001851 err = -EOPNOTSUPP;
1852 goto done;
1853 }
1854
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 switch (cmd) {
1856 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001857 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1858 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 break;
1860
1861 case HCISETENCRYPT:
1862 if (!lmp_encrypt_capable(hdev)) {
1863 err = -EOPNOTSUPP;
1864 break;
1865 }
1866
1867 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1868 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001869 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1870 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 if (err)
1872 break;
1873 }
1874
Johan Hedberg01178cd2013-03-05 20:37:41 +02001875 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1876 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 break;
1878
1879 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001880 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1881 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001882
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001883 /* Ensure that the connectable and discoverable states
1884 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001885 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001886 if (!err)
1887 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 break;
1889
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001890 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001891 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1892 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001893 break;
1894
1895 case HCISETLINKMODE:
1896 hdev->link_mode = ((__u16) dr.dev_opt) &
1897 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1898 break;
1899
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 case HCISETPTYPE:
1901 hdev->pkt_type = (__u16) dr.dev_opt;
1902 break;
1903
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001905 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1906 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 break;
1908
1909 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001910 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1911 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 break;
1913
1914 default:
1915 err = -EINVAL;
1916 break;
1917 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001918
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001919done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 hci_dev_put(hdev);
1921 return err;
1922}
1923
1924int hci_get_dev_list(void __user *arg)
1925{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001926 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 struct hci_dev_list_req *dl;
1928 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 int n = 0, size, err;
1930 __u16 dev_num;
1931
1932 if (get_user(dev_num, (__u16 __user *) arg))
1933 return -EFAULT;
1934
1935 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1936 return -EINVAL;
1937
1938 size = sizeof(*dl) + dev_num * sizeof(*dr);
1939
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001940 dl = kzalloc(size, GFP_KERNEL);
1941 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 return -ENOMEM;
1943
1944 dr = dl->dev_req;
1945
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001946 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001947 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001948 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001949
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001950 /* When the auto-off is configured it means the transport
1951 * is running, but in that case still indicate that the
1952 * device is actually down.
1953 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001954 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001955 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001956
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001958 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001959
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 if (++n >= dev_num)
1961 break;
1962 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001963 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964
1965 dl->dev_num = n;
1966 size = sizeof(*dl) + n * sizeof(*dr);
1967
1968 err = copy_to_user(arg, dl, size);
1969 kfree(dl);
1970
1971 return err ? -EFAULT : 0;
1972}
1973
1974int hci_get_dev_info(void __user *arg)
1975{
1976 struct hci_dev *hdev;
1977 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001978 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 int err = 0;
1980
1981 if (copy_from_user(&di, arg, sizeof(di)))
1982 return -EFAULT;
1983
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001984 hdev = hci_dev_get(di.dev_id);
1985 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 return -ENODEV;
1987
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001988 /* When the auto-off is configured it means the transport
1989 * is running, but in that case still indicate that the
1990 * device is actually down.
1991 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001992 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001993 flags = hdev->flags & ~BIT(HCI_UP);
1994 else
1995 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001996
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 strcpy(di.name, hdev->name);
1998 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001999 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002000 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002002 if (lmp_bredr_capable(hdev)) {
2003 di.acl_mtu = hdev->acl_mtu;
2004 di.acl_pkts = hdev->acl_pkts;
2005 di.sco_mtu = hdev->sco_mtu;
2006 di.sco_pkts = hdev->sco_pkts;
2007 } else {
2008 di.acl_mtu = hdev->le_mtu;
2009 di.acl_pkts = hdev->le_pkts;
2010 di.sco_mtu = 0;
2011 di.sco_pkts = 0;
2012 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 di.link_policy = hdev->link_policy;
2014 di.link_mode = hdev->link_mode;
2015
2016 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2017 memcpy(&di.features, &hdev->features, sizeof(di.features));
2018
2019 if (copy_to_user(arg, &di, sizeof(di)))
2020 err = -EFAULT;
2021
2022 hci_dev_put(hdev);
2023
2024 return err;
2025}
2026
2027/* ---- Interface to HCI drivers ---- */
2028
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002029static int hci_rfkill_set_block(void *data, bool blocked)
2030{
2031 struct hci_dev *hdev = data;
2032
2033 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2034
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002035 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002036 return -EBUSY;
2037
Johan Hedberg5e130362013-09-13 08:58:17 +03002038 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002039 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002040 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2041 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002042 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002043 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002044 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002045 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002046
2047 return 0;
2048}
2049
2050static const struct rfkill_ops hci_rfkill_ops = {
2051 .set_block = hci_rfkill_set_block,
2052};
2053
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002054static void hci_power_on(struct work_struct *work)
2055{
2056 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002057 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002058
2059 BT_DBG("%s", hdev->name);
2060
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002061 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002062 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302063 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002064 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302065 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002066 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002067 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002068
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002069 /* During the HCI setup phase, a few error conditions are
2070 * ignored and they need to be checked now. If they are still
2071 * valid, it is important to turn the device back off.
2072 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002073 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2074 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002075 (hdev->dev_type == HCI_BREDR &&
2076 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2077 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002078 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002079 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002080 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002081 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2082 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002083 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002084
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002085 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002086 /* For unconfigured devices, set the HCI_RAW flag
2087 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002088 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002089 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002090 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002091
2092 /* For fully configured devices, this will send
2093 * the Index Added event. For unconfigured devices,
2094 * it will send Unconfigued Index Added event.
2095 *
2096 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2097 * and no event will be send.
2098 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002099 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002100 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002101 /* When the controller is now configured, then it
2102 * is important to clear the HCI_RAW flag.
2103 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002104 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002105 clear_bit(HCI_RAW, &hdev->flags);
2106
Marcel Holtmannd603b762014-07-06 12:11:14 +02002107 /* Powering on the controller with HCI_CONFIG set only
2108 * happens with the transition from unconfigured to
2109 * configured. This will send the Index Added event.
2110 */
2111 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002112 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002113}
2114
2115static void hci_power_off(struct work_struct *work)
2116{
Johan Hedberg32435532011-11-07 22:16:04 +02002117 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002118 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002119
2120 BT_DBG("%s", hdev->name);
2121
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002122 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002123}
2124
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002125static void hci_error_reset(struct work_struct *work)
2126{
2127 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2128
2129 BT_DBG("%s", hdev->name);
2130
2131 if (hdev->hw_error)
2132 hdev->hw_error(hdev, hdev->hw_error_code);
2133 else
2134 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2135 hdev->hw_error_code);
2136
2137 if (hci_dev_do_close(hdev))
2138 return;
2139
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002140 hci_dev_do_open(hdev);
2141}
2142
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002143static void hci_discov_off(struct work_struct *work)
2144{
2145 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002146
2147 hdev = container_of(work, struct hci_dev, discov_off.work);
2148
2149 BT_DBG("%s", hdev->name);
2150
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002151 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002152}
2153
Johan Hedberg35f74982014-02-18 17:14:32 +02002154void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002155{
Johan Hedberg48210022013-01-27 00:31:28 +02002156 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002157
Johan Hedberg48210022013-01-27 00:31:28 +02002158 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2159 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002160 kfree(uuid);
2161 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002162}
2163
Johan Hedberg35f74982014-02-18 17:14:32 +02002164void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002165{
Johan Hedberg0378b592014-11-19 15:22:22 +02002166 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002167
Johan Hedberg0378b592014-11-19 15:22:22 +02002168 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2169 list_del_rcu(&key->list);
2170 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002171 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002172}
2173
Johan Hedberg35f74982014-02-18 17:14:32 +02002174void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002175{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002176 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002177
Johan Hedberg970d0f12014-11-13 14:37:47 +02002178 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2179 list_del_rcu(&k->list);
2180 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002181 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002182}
2183
Johan Hedberg970c4e42014-02-18 10:19:33 +02002184void hci_smp_irks_clear(struct hci_dev *hdev)
2185{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002186 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002187
Johan Hedbergadae20c2014-11-13 14:37:48 +02002188 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2189 list_del_rcu(&k->list);
2190 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002191 }
2192}
2193
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002194struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2195{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002196 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002197
Johan Hedberg0378b592014-11-19 15:22:22 +02002198 rcu_read_lock();
2199 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2200 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2201 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002202 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002203 }
2204 }
2205 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002206
2207 return NULL;
2208}
2209
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302210static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002211 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002212{
2213 /* Legacy key */
2214 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302215 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002216
2217 /* Debug keys are insecure so don't store them persistently */
2218 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302219 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002220
2221 /* Changed combination key and there's no previous one */
2222 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302223 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002224
2225 /* Security mode 3 case */
2226 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302227 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002228
Johan Hedberge3befab2014-06-01 16:33:39 +03002229 /* BR/EDR key derived using SC from an LE link */
2230 if (conn->type == LE_LINK)
2231 return true;
2232
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002233 /* Neither local nor remote side had no-bonding as requirement */
2234 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302235 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002236
2237 /* Local side had dedicated bonding as requirement */
2238 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302239 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002240
2241 /* Remote side had dedicated bonding as requirement */
2242 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302243 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002244
2245 /* If none of the above criteria match, then don't store the key
2246 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302247 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002248}
2249
Johan Hedberge804d252014-07-16 11:42:28 +03002250static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002251{
Johan Hedberge804d252014-07-16 11:42:28 +03002252 if (type == SMP_LTK)
2253 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002254
Johan Hedberge804d252014-07-16 11:42:28 +03002255 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002256}
2257
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002258struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2259 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002260{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002261 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002262
Johan Hedberg970d0f12014-11-13 14:37:47 +02002263 rcu_read_lock();
2264 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002265 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2266 continue;
2267
Johan Hedberg923e2412014-12-03 12:43:39 +02002268 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002269 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002270 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002271 }
2272 }
2273 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002274
2275 return NULL;
2276}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002277
Johan Hedberg970c4e42014-02-18 10:19:33 +02002278struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2279{
2280 struct smp_irk *irk;
2281
Johan Hedbergadae20c2014-11-13 14:37:48 +02002282 rcu_read_lock();
2283 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2284 if (!bacmp(&irk->rpa, rpa)) {
2285 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002286 return irk;
2287 }
2288 }
2289
Johan Hedbergadae20c2014-11-13 14:37:48 +02002290 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2291 if (smp_irk_matches(hdev, irk->val, rpa)) {
2292 bacpy(&irk->rpa, rpa);
2293 rcu_read_unlock();
2294 return irk;
2295 }
2296 }
2297 rcu_read_unlock();
2298
Johan Hedberg970c4e42014-02-18 10:19:33 +02002299 return NULL;
2300}
2301
2302struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2303 u8 addr_type)
2304{
2305 struct smp_irk *irk;
2306
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002307 /* Identity Address must be public or static random */
2308 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2309 return NULL;
2310
Johan Hedbergadae20c2014-11-13 14:37:48 +02002311 rcu_read_lock();
2312 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002313 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002314 bacmp(bdaddr, &irk->bdaddr) == 0) {
2315 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002316 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002317 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002318 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002319 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002320
2321 return NULL;
2322}
2323
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002324struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002325 bdaddr_t *bdaddr, u8 *val, u8 type,
2326 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002327{
2328 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302329 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002330
2331 old_key = hci_find_link_key(hdev, bdaddr);
2332 if (old_key) {
2333 old_key_type = old_key->type;
2334 key = old_key;
2335 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002336 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002337 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002338 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002339 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002340 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002341 }
2342
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002343 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002344
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002345 /* Some buggy controller combinations generate a changed
2346 * combination key for legacy pairing even when there's no
2347 * previous key */
2348 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002349 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002350 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002351 if (conn)
2352 conn->key_type = type;
2353 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002354
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002355 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002356 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002357 key->pin_len = pin_len;
2358
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002359 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002360 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002361 else
2362 key->type = type;
2363
Johan Hedberg7652ff62014-06-24 13:15:49 +03002364 if (persistent)
2365 *persistent = hci_persistent_key(hdev, conn, type,
2366 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002367
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002368 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002369}
2370
Johan Hedbergca9142b2014-02-19 14:57:44 +02002371struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002372 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002373 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002374{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002375 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002376 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002377
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002378 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002379 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002380 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002381 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002382 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002383 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002384 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002385 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002386 }
2387
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002388 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002389 key->bdaddr_type = addr_type;
2390 memcpy(key->val, tk, sizeof(key->val));
2391 key->authenticated = authenticated;
2392 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002393 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002394 key->enc_size = enc_size;
2395 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002396
Johan Hedbergca9142b2014-02-19 14:57:44 +02002397 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002398}
2399
Johan Hedbergca9142b2014-02-19 14:57:44 +02002400struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2401 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002402{
2403 struct smp_irk *irk;
2404
2405 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2406 if (!irk) {
2407 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2408 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002409 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002410
2411 bacpy(&irk->bdaddr, bdaddr);
2412 irk->addr_type = addr_type;
2413
Johan Hedbergadae20c2014-11-13 14:37:48 +02002414 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002415 }
2416
2417 memcpy(irk->val, val, 16);
2418 bacpy(&irk->rpa, rpa);
2419
Johan Hedbergca9142b2014-02-19 14:57:44 +02002420 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002421}
2422
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002423int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2424{
2425 struct link_key *key;
2426
2427 key = hci_find_link_key(hdev, bdaddr);
2428 if (!key)
2429 return -ENOENT;
2430
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002431 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002432
Johan Hedberg0378b592014-11-19 15:22:22 +02002433 list_del_rcu(&key->list);
2434 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002435
2436 return 0;
2437}
2438
Johan Hedberge0b2b272014-02-18 17:14:31 +02002439int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002440{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002441 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002442 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002443
Johan Hedberg970d0f12014-11-13 14:37:47 +02002444 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002445 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002446 continue;
2447
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002448 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002449
Johan Hedberg970d0f12014-11-13 14:37:47 +02002450 list_del_rcu(&k->list);
2451 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002452 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002453 }
2454
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002455 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002456}
2457
Johan Hedberga7ec7332014-02-18 17:14:35 +02002458void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2459{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002460 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002461
Johan Hedbergadae20c2014-11-13 14:37:48 +02002462 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002463 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2464 continue;
2465
2466 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2467
Johan Hedbergadae20c2014-11-13 14:37:48 +02002468 list_del_rcu(&k->list);
2469 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002470 }
2471}
2472
Johan Hedberg55e76b32015-03-10 22:34:40 +02002473bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2474{
2475 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002476 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002477 u8 addr_type;
2478
2479 if (type == BDADDR_BREDR) {
2480 if (hci_find_link_key(hdev, bdaddr))
2481 return true;
2482 return false;
2483 }
2484
2485 /* Convert to HCI addr type which struct smp_ltk uses */
2486 if (type == BDADDR_LE_PUBLIC)
2487 addr_type = ADDR_LE_DEV_PUBLIC;
2488 else
2489 addr_type = ADDR_LE_DEV_RANDOM;
2490
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002491 irk = hci_get_irk(hdev, bdaddr, addr_type);
2492 if (irk) {
2493 bdaddr = &irk->bdaddr;
2494 addr_type = irk->addr_type;
2495 }
2496
Johan Hedberg55e76b32015-03-10 22:34:40 +02002497 rcu_read_lock();
2498 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002499 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2500 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002501 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002502 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002503 }
2504 rcu_read_unlock();
2505
2506 return false;
2507}
2508
Ville Tervo6bd32322011-02-16 16:32:41 +02002509/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002510static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002511{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002512 struct hci_dev *hdev = container_of(work, struct hci_dev,
2513 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002514
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002515 if (hdev->sent_cmd) {
2516 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2517 u16 opcode = __le16_to_cpu(sent->opcode);
2518
2519 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2520 } else {
2521 BT_ERR("%s command tx timeout", hdev->name);
2522 }
2523
Ville Tervo6bd32322011-02-16 16:32:41 +02002524 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002525 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002526}
2527
Szymon Janc2763eda2011-03-22 13:12:22 +01002528struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002529 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002530{
2531 struct oob_data *data;
2532
Johan Hedberg6928a922014-10-26 20:46:09 +01002533 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2534 if (bacmp(bdaddr, &data->bdaddr) != 0)
2535 continue;
2536 if (data->bdaddr_type != bdaddr_type)
2537 continue;
2538 return data;
2539 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002540
2541 return NULL;
2542}
2543
Johan Hedberg6928a922014-10-26 20:46:09 +01002544int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2545 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002546{
2547 struct oob_data *data;
2548
Johan Hedberg6928a922014-10-26 20:46:09 +01002549 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002550 if (!data)
2551 return -ENOENT;
2552
Johan Hedberg6928a922014-10-26 20:46:09 +01002553 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002554
2555 list_del(&data->list);
2556 kfree(data);
2557
2558 return 0;
2559}
2560
Johan Hedberg35f74982014-02-18 17:14:32 +02002561void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002562{
2563 struct oob_data *data, *n;
2564
2565 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2566 list_del(&data->list);
2567 kfree(data);
2568 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002569}
2570
Marcel Holtmann07988722014-01-10 02:07:29 -08002571int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002572 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d52014-10-26 20:33:47 +01002573 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002574{
2575 struct oob_data *data;
2576
Johan Hedberg6928a922014-10-26 20:46:09 +01002577 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002578 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002579 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002580 if (!data)
2581 return -ENOMEM;
2582
2583 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002584 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002585 list_add(&data->list, &hdev->remote_oob_data);
2586 }
2587
Johan Hedberg81328d52014-10-26 20:33:47 +01002588 if (hash192 && rand192) {
2589 memcpy(data->hash192, hash192, sizeof(data->hash192));
2590 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002591 if (hash256 && rand256)
2592 data->present = 0x03;
Johan Hedberg81328d52014-10-26 20:33:47 +01002593 } else {
2594 memset(data->hash192, 0, sizeof(data->hash192));
2595 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002596 if (hash256 && rand256)
2597 data->present = 0x02;
2598 else
2599 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002600 }
2601
Johan Hedberg81328d52014-10-26 20:33:47 +01002602 if (hash256 && rand256) {
2603 memcpy(data->hash256, hash256, sizeof(data->hash256));
2604 memcpy(data->rand256, rand256, sizeof(data->rand256));
2605 } else {
2606 memset(data->hash256, 0, sizeof(data->hash256));
2607 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002608 if (hash192 && rand192)
2609 data->present = 0x01;
Johan Hedberg81328d52014-10-26 20:33:47 +01002610 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002611
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002612 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002613
2614 return 0;
2615}
2616
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002617struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002618 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002619{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002620 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002621
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002622 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002623 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002624 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002625 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002626
2627 return NULL;
2628}
2629
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002630void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002631{
2632 struct list_head *p, *n;
2633
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002634 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002635 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002636
2637 list_del(p);
2638 kfree(b);
2639 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002640}
2641
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002642int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002643{
2644 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002645
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002646 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002647 return -EBADF;
2648
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002649 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002650 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002651
Johan Hedberg27f70f32014-07-21 10:50:06 +03002652 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002653 if (!entry)
2654 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002655
2656 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002657 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002658
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002659 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002660
2661 return 0;
2662}
2663
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002664int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002665{
2666 struct bdaddr_list *entry;
2667
Johan Hedberg35f74982014-02-18 17:14:32 +02002668 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002669 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002670 return 0;
2671 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002672
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002673 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002674 if (!entry)
2675 return -ENOENT;
2676
2677 list_del(&entry->list);
2678 kfree(entry);
2679
2680 return 0;
2681}
2682
Andre Guedes15819a72014-02-03 13:56:18 -03002683/* This function requires the caller holds hdev->lock */
2684struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2685 bdaddr_t *addr, u8 addr_type)
2686{
2687 struct hci_conn_params *params;
2688
Johan Hedberg738f6182014-07-03 19:33:51 +03002689 /* The conn params list only contains identity addresses */
2690 if (!hci_is_identity_address(addr, addr_type))
2691 return NULL;
2692
Andre Guedes15819a72014-02-03 13:56:18 -03002693 list_for_each_entry(params, &hdev->le_conn_params, list) {
2694 if (bacmp(&params->addr, addr) == 0 &&
2695 params->addr_type == addr_type) {
2696 return params;
2697 }
2698 }
2699
2700 return NULL;
2701}
2702
2703/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002704struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2705 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002706{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002707 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002708
Johan Hedberg738f6182014-07-03 19:33:51 +03002709 /* The list only contains identity addresses */
2710 if (!hci_is_identity_address(addr, addr_type))
2711 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002712
Johan Hedberg501f8822014-07-04 12:37:26 +03002713 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002714 if (bacmp(&param->addr, addr) == 0 &&
2715 param->addr_type == addr_type)
2716 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002717 }
2718
2719 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002720}
2721
2722/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002723struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2724 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002725{
2726 struct hci_conn_params *params;
2727
Johan Hedbergc46245b2014-07-02 17:37:33 +03002728 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002729 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03002730
Andre Guedes15819a72014-02-03 13:56:18 -03002731 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002732 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002733 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002734
2735 params = kzalloc(sizeof(*params), GFP_KERNEL);
2736 if (!params) {
2737 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002738 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002739 }
2740
2741 bacpy(&params->addr, addr);
2742 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002743
2744 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002745 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002746
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002747 params->conn_min_interval = hdev->le_conn_min_interval;
2748 params->conn_max_interval = hdev->le_conn_max_interval;
2749 params->conn_latency = hdev->le_conn_latency;
2750 params->supervision_timeout = hdev->le_supv_timeout;
2751 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2752
2753 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2754
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002755 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002756}
2757
Johan Hedbergf6c63242014-08-15 21:06:59 +03002758static void hci_conn_params_free(struct hci_conn_params *params)
2759{
2760 if (params->conn) {
2761 hci_conn_drop(params->conn);
2762 hci_conn_put(params->conn);
2763 }
2764
2765 list_del(&params->action);
2766 list_del(&params->list);
2767 kfree(params);
2768}
2769
Andre Guedes15819a72014-02-03 13:56:18 -03002770/* This function requires the caller holds hdev->lock */
2771void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2772{
2773 struct hci_conn_params *params;
2774
2775 params = hci_conn_params_lookup(hdev, addr, addr_type);
2776 if (!params)
2777 return;
2778
Johan Hedbergf6c63242014-08-15 21:06:59 +03002779 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002780
Johan Hedberg95305ba2014-07-04 12:37:21 +03002781 hci_update_background_scan(hdev);
2782
Andre Guedes15819a72014-02-03 13:56:18 -03002783 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2784}
2785
2786/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03002787void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002788{
2789 struct hci_conn_params *params, *tmp;
2790
2791 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03002792 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2793 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03002794 list_del(&params->list);
2795 kfree(params);
2796 }
2797
Johan Hedberg55af49a2014-07-02 17:37:26 +03002798 BT_DBG("All LE disabled connection parameters were removed");
2799}
2800
2801/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03002802void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002803{
2804 struct hci_conn_params *params, *tmp;
2805
Johan Hedbergf6c63242014-08-15 21:06:59 +03002806 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2807 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002808
Johan Hedberga2f41a82014-07-04 12:37:19 +03002809 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02002810
Andre Guedes15819a72014-02-03 13:56:18 -03002811 BT_DBG("All LE connection parameters were removed");
2812}
2813
Marcel Holtmann1904a852015-01-11 13:50:44 -08002814static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002815{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002816 if (status) {
2817 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002818
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002819 hci_dev_lock(hdev);
2820 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2821 hci_dev_unlock(hdev);
2822 return;
2823 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002824}
2825
Marcel Holtmann1904a852015-01-11 13:50:44 -08002826static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2827 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002828{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002829 /* General inquiry access code (GIAC) */
2830 u8 lap[3] = { 0x33, 0x8b, 0x9e };
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002831 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002832 int err;
2833
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002834 if (status) {
2835 BT_ERR("Failed to disable LE scanning: status %d", status);
2836 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002837 }
2838
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002839 hdev->discovery.scan_start = 0;
2840
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002841 switch (hdev->discovery.type) {
2842 case DISCOV_TYPE_LE:
2843 hci_dev_lock(hdev);
2844 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2845 hci_dev_unlock(hdev);
2846 break;
2847
2848 case DISCOV_TYPE_INTERLEAVED:
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002849 hci_dev_lock(hdev);
2850
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002851 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2852 &hdev->quirks)) {
2853 /* If we were running LE only scan, change discovery
2854 * state. If we were running both LE and BR/EDR inquiry
2855 * simultaneously, and BR/EDR inquiry is already
2856 * finished, stop discovery, otherwise BR/EDR inquiry
Wesley Kuo177d0502015-05-13 10:33:15 +08002857 * will stop discovery when finished. If we will resolve
2858 * remote device name, do not change discovery state.
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002859 */
Wesley Kuo177d0502015-05-13 10:33:15 +08002860 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2861 hdev->discovery.state != DISCOVERY_RESOLVING)
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002862 hci_discovery_set_state(hdev,
2863 DISCOVERY_STOPPED);
2864 } else {
Johan Hedbergbaf880a2015-03-21 08:02:23 +02002865 struct hci_request req;
2866
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002867 hci_inquiry_cache_flush(hdev);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002868
Johan Hedbergbaf880a2015-03-21 08:02:23 +02002869 hci_req_init(&req, hdev);
2870
2871 memset(&cp, 0, sizeof(cp));
2872 memcpy(&cp.lap, lap, sizeof(cp.lap));
2873 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2874 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2875
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002876 err = hci_req_run(&req, inquiry_complete);
2877 if (err) {
2878 BT_ERR("Inquiry request failed: err %d", err);
2879 hci_discovery_set_state(hdev,
2880 DISCOVERY_STOPPED);
2881 }
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002882 }
2883
2884 hci_dev_unlock(hdev);
2885 break;
2886 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002887}
2888
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002889static void le_scan_disable_work(struct work_struct *work)
2890{
2891 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002892 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002893 struct hci_request req;
2894 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002895
2896 BT_DBG("%s", hdev->name);
2897
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002898 cancel_delayed_work_sync(&hdev->le_scan_restart);
2899
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002900 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002901
Andre Guedesb1efcc22014-02-26 20:21:40 -03002902 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002903
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002904 err = hci_req_run(&req, le_scan_disable_work_complete);
2905 if (err)
2906 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002907}
2908
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002909static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2910 u16 opcode)
2911{
2912 unsigned long timeout, duration, scan_start, now;
2913
2914 BT_DBG("%s", hdev->name);
2915
2916 if (status) {
2917 BT_ERR("Failed to restart LE scan: status %d", status);
2918 return;
2919 }
2920
2921 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2922 !hdev->discovery.scan_start)
2923 return;
2924
2925 /* When the scan was started, hdev->le_scan_disable has been queued
2926 * after duration from scan_start. During scan restart this job
2927 * has been canceled, and we need to queue it again after proper
2928 * timeout, to make sure that scan does not run indefinitely.
2929 */
2930 duration = hdev->discovery.scan_duration;
2931 scan_start = hdev->discovery.scan_start;
2932 now = jiffies;
2933 if (now - scan_start <= duration) {
2934 int elapsed;
2935
2936 if (now >= scan_start)
2937 elapsed = now - scan_start;
2938 else
2939 elapsed = ULONG_MAX - scan_start + now;
2940
2941 timeout = duration - elapsed;
2942 } else {
2943 timeout = 0;
2944 }
2945 queue_delayed_work(hdev->workqueue,
2946 &hdev->le_scan_disable, timeout);
2947}
2948
2949static void le_scan_restart_work(struct work_struct *work)
2950{
2951 struct hci_dev *hdev = container_of(work, struct hci_dev,
2952 le_scan_restart.work);
2953 struct hci_request req;
2954 struct hci_cp_le_set_scan_enable cp;
2955 int err;
2956
2957 BT_DBG("%s", hdev->name);
2958
2959 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002960 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002961 return;
2962
2963 hci_req_init(&req, hdev);
2964
2965 hci_req_add_le_scan_disable(&req);
2966
2967 memset(&cp, 0, sizeof(cp));
2968 cp.enable = LE_SCAN_ENABLE;
2969 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2970 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2971
2972 err = hci_req_run(&req, le_scan_restart_work_complete);
2973 if (err)
2974 BT_ERR("Restart LE scan request failed: err %d", err);
2975}
2976
Johan Hedberga1f4c312014-02-27 14:05:41 +02002977/* Copy the Identity Address of the controller.
2978 *
2979 * If the controller has a public BD_ADDR, then by default use that one.
2980 * If this is a LE only controller without a public address, default to
2981 * the static random address.
2982 *
2983 * For debugging purposes it is possible to force controllers with a
2984 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002985 *
2986 * In case BR/EDR has been disabled on a dual-mode controller and
2987 * userspace has configured a static address, then that address
2988 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002989 */
2990void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2991 u8 *bdaddr_type)
2992{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002993 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002994 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002995 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002996 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002997 bacpy(bdaddr, &hdev->static_addr);
2998 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2999 } else {
3000 bacpy(bdaddr, &hdev->bdaddr);
3001 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3002 }
3003}
3004
David Herrmann9be0dab2012-04-22 14:39:57 +02003005/* Alloc HCI device */
3006struct hci_dev *hci_alloc_dev(void)
3007{
3008 struct hci_dev *hdev;
3009
Johan Hedberg27f70f32014-07-21 10:50:06 +03003010 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003011 if (!hdev)
3012 return NULL;
3013
David Herrmannb1b813d2012-04-22 14:39:58 +02003014 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3015 hdev->esco_type = (ESCO_HV1);
3016 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003017 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3018 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003019 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003020 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3021 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003022
David Herrmannb1b813d2012-04-22 14:39:58 +02003023 hdev->sniff_max_interval = 800;
3024 hdev->sniff_min_interval = 80;
3025
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003026 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003027 hdev->le_adv_min_interval = 0x0800;
3028 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003029 hdev->le_scan_interval = 0x0060;
3030 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003031 hdev->le_conn_min_interval = 0x0028;
3032 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003033 hdev->le_conn_latency = 0x0000;
3034 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01003035 hdev->le_def_tx_len = 0x001b;
3036 hdev->le_def_tx_time = 0x0148;
3037 hdev->le_max_tx_len = 0x001b;
3038 hdev->le_max_tx_time = 0x0148;
3039 hdev->le_max_rx_len = 0x001b;
3040 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003041
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003042 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003043 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003044 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3045 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003046
David Herrmannb1b813d2012-04-22 14:39:58 +02003047 mutex_init(&hdev->lock);
3048 mutex_init(&hdev->req_lock);
3049
3050 INIT_LIST_HEAD(&hdev->mgmt_pending);
3051 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003052 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003053 INIT_LIST_HEAD(&hdev->uuids);
3054 INIT_LIST_HEAD(&hdev->link_keys);
3055 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003056 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003057 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003058 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003059 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003060 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003061 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003062 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003063
3064 INIT_WORK(&hdev->rx_work, hci_rx_work);
3065 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3066 INIT_WORK(&hdev->tx_work, hci_tx_work);
3067 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003068 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003069
David Herrmannb1b813d2012-04-22 14:39:58 +02003070 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3071 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3072 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003073 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
David Herrmannb1b813d2012-04-22 14:39:58 +02003074
David Herrmannb1b813d2012-04-22 14:39:58 +02003075 skb_queue_head_init(&hdev->rx_q);
3076 skb_queue_head_init(&hdev->cmd_q);
3077 skb_queue_head_init(&hdev->raw_q);
3078
3079 init_waitqueue_head(&hdev->req_wait_q);
3080
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003081 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003082
David Herrmannb1b813d2012-04-22 14:39:58 +02003083 hci_init_sysfs(hdev);
3084 discovery_init(hdev);
Arman Uguray203fea02015-03-23 15:57:11 -07003085 adv_info_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003086
3087 return hdev;
3088}
3089EXPORT_SYMBOL(hci_alloc_dev);
3090
3091/* Free HCI device */
3092void hci_free_dev(struct hci_dev *hdev)
3093{
David Herrmann9be0dab2012-04-22 14:39:57 +02003094 /* will free via device release */
3095 put_device(&hdev->dev);
3096}
3097EXPORT_SYMBOL(hci_free_dev);
3098
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099/* Register HCI device */
3100int hci_register_dev(struct hci_dev *hdev)
3101{
David Herrmannb1b813d2012-04-22 14:39:58 +02003102 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103
Marcel Holtmann74292d52014-07-06 15:50:27 +02003104 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105 return -EINVAL;
3106
Mat Martineau08add512011-11-02 16:18:36 -07003107 /* Do not allow HCI_AMP devices to register at index 0,
3108 * so the index can be used as the AMP controller ID.
3109 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003110 switch (hdev->dev_type) {
3111 case HCI_BREDR:
3112 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3113 break;
3114 case HCI_AMP:
3115 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3116 break;
3117 default:
3118 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003120
Sasha Levin3df92b32012-05-27 22:36:56 +02003121 if (id < 0)
3122 return id;
3123
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 sprintf(hdev->name, "hci%d", id);
3125 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003126
3127 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3128
Kees Cookd8537542013-07-03 15:04:57 -07003129 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3130 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003131 if (!hdev->workqueue) {
3132 error = -ENOMEM;
3133 goto err;
3134 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003135
Kees Cookd8537542013-07-03 15:04:57 -07003136 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3137 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003138 if (!hdev->req_workqueue) {
3139 destroy_workqueue(hdev->workqueue);
3140 error = -ENOMEM;
3141 goto err;
3142 }
3143
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003144 if (!IS_ERR_OR_NULL(bt_debugfs))
3145 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3146
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003147 dev_set_name(&hdev->dev, "%s", hdev->name);
3148
3149 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003150 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003151 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003153 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003154 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3155 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003156 if (hdev->rfkill) {
3157 if (rfkill_register(hdev->rfkill) < 0) {
3158 rfkill_destroy(hdev->rfkill);
3159 hdev->rfkill = NULL;
3160 }
3161 }
3162
Johan Hedberg5e130362013-09-13 08:58:17 +03003163 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003164 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003165
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003166 hci_dev_set_flag(hdev, HCI_SETUP);
3167 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003168
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003169 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003170 /* Assume BR/EDR support until proven otherwise (such as
3171 * through reading supported features during init.
3172 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003173 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003174 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003175
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003176 write_lock(&hci_dev_list_lock);
3177 list_add(&hdev->list, &hci_dev_list);
3178 write_unlock(&hci_dev_list_lock);
3179
Marcel Holtmann4a964402014-07-02 19:10:33 +02003180 /* Devices that are marked for raw-only usage are unconfigured
3181 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003182 */
3183 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003184 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003185
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003187 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188
Johan Hedberg19202572013-01-14 22:33:51 +02003189 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003190
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003192
David Herrmann33ca9542011-10-08 14:58:49 +02003193err_wqueue:
3194 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003195 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003196err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003197 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003198
David Herrmann33ca9542011-10-08 14:58:49 +02003199 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200}
3201EXPORT_SYMBOL(hci_register_dev);
3202
3203/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003204void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003206 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003207
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003208 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003210 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003211
Sasha Levin3df92b32012-05-27 22:36:56 +02003212 id = hdev->id;
3213
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003214 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003216 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217
3218 hci_dev_do_close(hdev);
3219
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003220 cancel_work_sync(&hdev->power_on);
3221
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003222 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003223 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3224 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003225 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003226 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003227 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003228 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003229
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003230 /* mgmt_index_removed should take care of emptying the
3231 * pending list */
3232 BUG_ON(!list_empty(&hdev->mgmt_pending));
3233
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 hci_notify(hdev, HCI_DEV_UNREG);
3235
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003236 if (hdev->rfkill) {
3237 rfkill_unregister(hdev->rfkill);
3238 rfkill_destroy(hdev->rfkill);
3239 }
3240
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003241 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003242
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003243 debugfs_remove_recursive(hdev->debugfs);
3244
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003245 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003246 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003247
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003248 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003249 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003250 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003251 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003252 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003253 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003254 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003255 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003256 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003257 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003258 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003259 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003260
David Herrmanndc946bd2012-01-07 15:47:24 +01003261 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003262
3263 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264}
3265EXPORT_SYMBOL(hci_unregister_dev);
3266
3267/* Suspend HCI device */
3268int hci_suspend_dev(struct hci_dev *hdev)
3269{
3270 hci_notify(hdev, HCI_DEV_SUSPEND);
3271 return 0;
3272}
3273EXPORT_SYMBOL(hci_suspend_dev);
3274
3275/* Resume HCI device */
3276int hci_resume_dev(struct hci_dev *hdev)
3277{
3278 hci_notify(hdev, HCI_DEV_RESUME);
3279 return 0;
3280}
3281EXPORT_SYMBOL(hci_resume_dev);
3282
Marcel Holtmann75e05692014-11-02 08:15:38 +01003283/* Reset HCI device */
3284int hci_reset_dev(struct hci_dev *hdev)
3285{
3286 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3287 struct sk_buff *skb;
3288
3289 skb = bt_skb_alloc(3, GFP_ATOMIC);
3290 if (!skb)
3291 return -ENOMEM;
3292
3293 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3294 memcpy(skb_put(skb, 3), hw_err, 3);
3295
3296 /* Send Hardware Error to upper stack */
3297 return hci_recv_frame(hdev, skb);
3298}
3299EXPORT_SYMBOL(hci_reset_dev);
3300
Marcel Holtmann76bca882009-11-18 00:40:39 +01003301/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003302int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003303{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003304 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003305 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003306 kfree_skb(skb);
3307 return -ENXIO;
3308 }
3309
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003310 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003311 bt_cb(skb)->incoming = 1;
3312
3313 /* Time stamp */
3314 __net_timestamp(skb);
3315
Marcel Holtmann76bca882009-11-18 00:40:39 +01003316 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003317 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003318
Marcel Holtmann76bca882009-11-18 00:40:39 +01003319 return 0;
3320}
3321EXPORT_SYMBOL(hci_recv_frame);
3322
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323/* ---- Interface to upper protocols ---- */
3324
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325int hci_register_cb(struct hci_cb *cb)
3326{
3327 BT_DBG("%p name %s", cb, cb->name);
3328
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003329 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003330 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003331 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003332
3333 return 0;
3334}
3335EXPORT_SYMBOL(hci_register_cb);
3336
3337int hci_unregister_cb(struct hci_cb *cb)
3338{
3339 BT_DBG("%p name %s", cb, cb->name);
3340
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003341 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003343 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344
3345 return 0;
3346}
3347EXPORT_SYMBOL(hci_unregister_cb);
3348
Marcel Holtmann51086992013-10-10 14:54:19 -07003349static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003351 int err;
3352
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003353 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003355 /* Time stamp */
3356 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003358 /* Send copy to monitor */
3359 hci_send_to_monitor(hdev, skb);
3360
3361 if (atomic_read(&hdev->promisc)) {
3362 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003363 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364 }
3365
3366 /* Get rid of skb owner, prior to sending to the driver. */
3367 skb_orphan(skb);
3368
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003369 err = hdev->send(hdev, skb);
3370 if (err < 0) {
3371 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3372 kfree_skb(skb);
3373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374}
3375
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003376/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003377int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3378 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003379{
3380 struct sk_buff *skb;
3381
3382 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3383
3384 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3385 if (!skb) {
3386 BT_ERR("%s no memory for command", hdev->name);
3387 return -ENOMEM;
3388 }
3389
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003390 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003391 * single-command requests.
3392 */
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03003393 bt_cb(skb)->req.start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02003394
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003396 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397
3398 return 0;
3399}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400
3401/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003402void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403{
3404 struct hci_command_hdr *hdr;
3405
3406 if (!hdev->sent_cmd)
3407 return NULL;
3408
3409 hdr = (void *) hdev->sent_cmd->data;
3410
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003411 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412 return NULL;
3413
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003414 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415
3416 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3417}
3418
3419/* Send ACL data */
3420static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3421{
3422 struct hci_acl_hdr *hdr;
3423 int len = skb->len;
3424
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003425 skb_push(skb, HCI_ACL_HDR_SIZE);
3426 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003427 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003428 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3429 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430}
3431
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003432static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003433 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003435 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 struct hci_dev *hdev = conn->hdev;
3437 struct sk_buff *list;
3438
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003439 skb->len = skb_headlen(skb);
3440 skb->data_len = 0;
3441
3442 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003443
3444 switch (hdev->dev_type) {
3445 case HCI_BREDR:
3446 hci_add_acl_hdr(skb, conn->handle, flags);
3447 break;
3448 case HCI_AMP:
3449 hci_add_acl_hdr(skb, chan->handle, flags);
3450 break;
3451 default:
3452 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3453 return;
3454 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003455
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003456 list = skb_shinfo(skb)->frag_list;
3457 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 /* Non fragmented */
3459 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3460
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003461 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 } else {
3463 /* Fragmented */
3464 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3465
3466 skb_shinfo(skb)->frag_list = NULL;
3467
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003468 /* Queue all fragments atomically. We need to use spin_lock_bh
3469 * here because of 6LoWPAN links, as there this function is
3470 * called from softirq and using normal spin lock could cause
3471 * deadlocks.
3472 */
3473 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003475 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003476
3477 flags &= ~ACL_START;
3478 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479 do {
3480 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003481
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003482 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003483 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484
3485 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3486
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003487 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 } while (list);
3489
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003490 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003492}
3493
3494void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3495{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003496 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003497
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003498 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003499
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003500 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003502 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504
3505/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003506void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507{
3508 struct hci_dev *hdev = conn->hdev;
3509 struct hci_sco_hdr hdr;
3510
3511 BT_DBG("%s len %d", hdev->name, skb->len);
3512
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003513 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 hdr.dlen = skb->len;
3515
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003516 skb_push(skb, HCI_SCO_HDR_SIZE);
3517 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003518 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003520 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003521
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003523 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525
3526/* ---- HCI TX task (outgoing data) ---- */
3527
3528/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003529static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3530 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531{
3532 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003533 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003534 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003536 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003538
3539 rcu_read_lock();
3540
3541 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003542 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003544
3545 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3546 continue;
3547
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548 num++;
3549
3550 if (c->sent < min) {
3551 min = c->sent;
3552 conn = c;
3553 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003554
3555 if (hci_conn_num(hdev, type) == num)
3556 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 }
3558
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003559 rcu_read_unlock();
3560
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003562 int cnt, q;
3563
3564 switch (conn->type) {
3565 case ACL_LINK:
3566 cnt = hdev->acl_cnt;
3567 break;
3568 case SCO_LINK:
3569 case ESCO_LINK:
3570 cnt = hdev->sco_cnt;
3571 break;
3572 case LE_LINK:
3573 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3574 break;
3575 default:
3576 cnt = 0;
3577 BT_ERR("Unknown link type");
3578 }
3579
3580 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581 *quote = q ? q : 1;
3582 } else
3583 *quote = 0;
3584
3585 BT_DBG("conn %p quote %d", conn, *quote);
3586 return conn;
3587}
3588
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003589static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590{
3591 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003592 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593
Ville Tervobae1f5d92011-02-10 22:38:53 -03003594 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003596 rcu_read_lock();
3597
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003599 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003600 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003601 BT_ERR("%s killing stalled connection %pMR",
3602 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003603 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604 }
3605 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003606
3607 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608}
3609
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003610static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3611 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003612{
3613 struct hci_conn_hash *h = &hdev->conn_hash;
3614 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003615 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003616 struct hci_conn *conn;
3617 int cnt, q, conn_num = 0;
3618
3619 BT_DBG("%s", hdev->name);
3620
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003621 rcu_read_lock();
3622
3623 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003624 struct hci_chan *tmp;
3625
3626 if (conn->type != type)
3627 continue;
3628
3629 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3630 continue;
3631
3632 conn_num++;
3633
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003634 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003635 struct sk_buff *skb;
3636
3637 if (skb_queue_empty(&tmp->data_q))
3638 continue;
3639
3640 skb = skb_peek(&tmp->data_q);
3641 if (skb->priority < cur_prio)
3642 continue;
3643
3644 if (skb->priority > cur_prio) {
3645 num = 0;
3646 min = ~0;
3647 cur_prio = skb->priority;
3648 }
3649
3650 num++;
3651
3652 if (conn->sent < min) {
3653 min = conn->sent;
3654 chan = tmp;
3655 }
3656 }
3657
3658 if (hci_conn_num(hdev, type) == conn_num)
3659 break;
3660 }
3661
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003662 rcu_read_unlock();
3663
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003664 if (!chan)
3665 return NULL;
3666
3667 switch (chan->conn->type) {
3668 case ACL_LINK:
3669 cnt = hdev->acl_cnt;
3670 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003671 case AMP_LINK:
3672 cnt = hdev->block_cnt;
3673 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003674 case SCO_LINK:
3675 case ESCO_LINK:
3676 cnt = hdev->sco_cnt;
3677 break;
3678 case LE_LINK:
3679 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3680 break;
3681 default:
3682 cnt = 0;
3683 BT_ERR("Unknown link type");
3684 }
3685
3686 q = cnt / num;
3687 *quote = q ? q : 1;
3688 BT_DBG("chan %p quote %d", chan, *quote);
3689 return chan;
3690}
3691
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003692static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3693{
3694 struct hci_conn_hash *h = &hdev->conn_hash;
3695 struct hci_conn *conn;
3696 int num = 0;
3697
3698 BT_DBG("%s", hdev->name);
3699
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003700 rcu_read_lock();
3701
3702 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003703 struct hci_chan *chan;
3704
3705 if (conn->type != type)
3706 continue;
3707
3708 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3709 continue;
3710
3711 num++;
3712
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003713 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003714 struct sk_buff *skb;
3715
3716 if (chan->sent) {
3717 chan->sent = 0;
3718 continue;
3719 }
3720
3721 if (skb_queue_empty(&chan->data_q))
3722 continue;
3723
3724 skb = skb_peek(&chan->data_q);
3725 if (skb->priority >= HCI_PRIO_MAX - 1)
3726 continue;
3727
3728 skb->priority = HCI_PRIO_MAX - 1;
3729
3730 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003731 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003732 }
3733
3734 if (hci_conn_num(hdev, type) == num)
3735 break;
3736 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003737
3738 rcu_read_unlock();
3739
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003740}
3741
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003742static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3743{
3744 /* Calculate count of blocks used by this packet */
3745 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3746}
3747
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003748static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003750 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 /* ACL tx timeout must be longer than maximum
3752 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003753 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003754 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003755 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003757}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003759static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003760{
3761 unsigned int cnt = hdev->acl_cnt;
3762 struct hci_chan *chan;
3763 struct sk_buff *skb;
3764 int quote;
3765
3766 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003767
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003768 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003769 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003770 u32 priority = (skb_peek(&chan->data_q))->priority;
3771 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003772 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003773 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003774
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003775 /* Stop if priority has changed */
3776 if (skb->priority < priority)
3777 break;
3778
3779 skb = skb_dequeue(&chan->data_q);
3780
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003781 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003782 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003783
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003784 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785 hdev->acl_last_tx = jiffies;
3786
3787 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003788 chan->sent++;
3789 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790 }
3791 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003792
3793 if (cnt != hdev->acl_cnt)
3794 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795}
3796
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003797static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003798{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003799 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003800 struct hci_chan *chan;
3801 struct sk_buff *skb;
3802 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003803 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003804
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003805 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003806
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003807 BT_DBG("%s", hdev->name);
3808
3809 if (hdev->dev_type == HCI_AMP)
3810 type = AMP_LINK;
3811 else
3812 type = ACL_LINK;
3813
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003814 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003815 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003816 u32 priority = (skb_peek(&chan->data_q))->priority;
3817 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3818 int blocks;
3819
3820 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003821 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003822
3823 /* Stop if priority has changed */
3824 if (skb->priority < priority)
3825 break;
3826
3827 skb = skb_dequeue(&chan->data_q);
3828
3829 blocks = __get_blocks(hdev, skb);
3830 if (blocks > hdev->block_cnt)
3831 return;
3832
3833 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003834 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003835
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003836 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003837 hdev->acl_last_tx = jiffies;
3838
3839 hdev->block_cnt -= blocks;
3840 quote -= blocks;
3841
3842 chan->sent += blocks;
3843 chan->conn->sent += blocks;
3844 }
3845 }
3846
3847 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003848 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003849}
3850
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003851static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003852{
3853 BT_DBG("%s", hdev->name);
3854
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003855 /* No ACL link over BR/EDR controller */
3856 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3857 return;
3858
3859 /* No AMP link over AMP controller */
3860 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003861 return;
3862
3863 switch (hdev->flow_ctl_mode) {
3864 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3865 hci_sched_acl_pkt(hdev);
3866 break;
3867
3868 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3869 hci_sched_acl_blk(hdev);
3870 break;
3871 }
3872}
3873
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003875static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876{
3877 struct hci_conn *conn;
3878 struct sk_buff *skb;
3879 int quote;
3880
3881 BT_DBG("%s", hdev->name);
3882
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003883 if (!hci_conn_num(hdev, SCO_LINK))
3884 return;
3885
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3887 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3888 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003889 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890
3891 conn->sent++;
3892 if (conn->sent == ~0)
3893 conn->sent = 0;
3894 }
3895 }
3896}
3897
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003898static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003899{
3900 struct hci_conn *conn;
3901 struct sk_buff *skb;
3902 int quote;
3903
3904 BT_DBG("%s", hdev->name);
3905
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003906 if (!hci_conn_num(hdev, ESCO_LINK))
3907 return;
3908
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003909 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3910 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003911 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3912 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003913 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003914
3915 conn->sent++;
3916 if (conn->sent == ~0)
3917 conn->sent = 0;
3918 }
3919 }
3920}
3921
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003922static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003923{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003924 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003925 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003926 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003927
3928 BT_DBG("%s", hdev->name);
3929
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003930 if (!hci_conn_num(hdev, LE_LINK))
3931 return;
3932
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003933 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003934 /* LE tx timeout must be longer than maximum
3935 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003936 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003937 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003938 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003939 }
3940
3941 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003942 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003943 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003944 u32 priority = (skb_peek(&chan->data_q))->priority;
3945 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003946 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003947 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003948
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003949 /* Stop if priority has changed */
3950 if (skb->priority < priority)
3951 break;
3952
3953 skb = skb_dequeue(&chan->data_q);
3954
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003955 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003956 hdev->le_last_tx = jiffies;
3957
3958 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003959 chan->sent++;
3960 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003961 }
3962 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003963
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003964 if (hdev->le_pkts)
3965 hdev->le_cnt = cnt;
3966 else
3967 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003968
3969 if (cnt != tmp)
3970 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003971}
3972
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003973static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003975 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976 struct sk_buff *skb;
3977
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003978 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003979 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003981 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07003982 /* Schedule queues and send stuff to HCI driver */
3983 hci_sched_acl(hdev);
3984 hci_sched_sco(hdev);
3985 hci_sched_esco(hdev);
3986 hci_sched_le(hdev);
3987 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003988
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989 /* Send next queued raw (unknown type) packet */
3990 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003991 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992}
3993
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003994/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995
3996/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003997static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998{
3999 struct hci_acl_hdr *hdr = (void *) skb->data;
4000 struct hci_conn *conn;
4001 __u16 handle, flags;
4002
4003 skb_pull(skb, HCI_ACL_HDR_SIZE);
4004
4005 handle = __le16_to_cpu(hdr->handle);
4006 flags = hci_flags(handle);
4007 handle = hci_handle(handle);
4008
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004009 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004010 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004011
4012 hdev->stat.acl_rx++;
4013
4014 hci_dev_lock(hdev);
4015 conn = hci_conn_hash_lookup_handle(hdev, handle);
4016 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004017
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004019 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004020
Linus Torvalds1da177e2005-04-16 15:20:36 -07004021 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004022 l2cap_recv_acldata(conn, skb, flags);
4023 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004025 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004026 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004027 }
4028
4029 kfree_skb(skb);
4030}
4031
4032/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004033static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004034{
4035 struct hci_sco_hdr *hdr = (void *) skb->data;
4036 struct hci_conn *conn;
4037 __u16 handle;
4038
4039 skb_pull(skb, HCI_SCO_HDR_SIZE);
4040
4041 handle = __le16_to_cpu(hdr->handle);
4042
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004043 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044
4045 hdev->stat.sco_rx++;
4046
4047 hci_dev_lock(hdev);
4048 conn = hci_conn_hash_lookup_handle(hdev, handle);
4049 hci_dev_unlock(hdev);
4050
4051 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004053 sco_recv_scodata(conn, skb);
4054 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004056 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004057 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058 }
4059
4060 kfree_skb(skb);
4061}
4062
Johan Hedberg9238f362013-03-05 20:37:48 +02004063static bool hci_req_is_complete(struct hci_dev *hdev)
4064{
4065 struct sk_buff *skb;
4066
4067 skb = skb_peek(&hdev->cmd_q);
4068 if (!skb)
4069 return true;
4070
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03004071 return bt_cb(skb)->req.start;
Johan Hedberg9238f362013-03-05 20:37:48 +02004072}
4073
Johan Hedberg42c6b122013-03-05 20:37:49 +02004074static void hci_resend_last(struct hci_dev *hdev)
4075{
4076 struct hci_command_hdr *sent;
4077 struct sk_buff *skb;
4078 u16 opcode;
4079
4080 if (!hdev->sent_cmd)
4081 return;
4082
4083 sent = (void *) hdev->sent_cmd->data;
4084 opcode = __le16_to_cpu(sent->opcode);
4085 if (opcode == HCI_OP_RESET)
4086 return;
4087
4088 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4089 if (!skb)
4090 return;
4091
4092 skb_queue_head(&hdev->cmd_q, skb);
4093 queue_work(hdev->workqueue, &hdev->cmd_work);
4094}
4095
Johan Hedberge62144872015-04-02 13:41:08 +03004096void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4097 hci_req_complete_t *req_complete,
4098 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004099{
Johan Hedberg9238f362013-03-05 20:37:48 +02004100 struct sk_buff *skb;
4101 unsigned long flags;
4102
4103 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4104
Johan Hedberg42c6b122013-03-05 20:37:49 +02004105 /* If the completed command doesn't match the last one that was
4106 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004107 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004108 if (!hci_sent_cmd_data(hdev, opcode)) {
4109 /* Some CSR based controllers generate a spontaneous
4110 * reset complete event during init and any pending
4111 * command will never be completed. In such a case we
4112 * need to resend whatever was the last sent
4113 * command.
4114 */
4115 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4116 hci_resend_last(hdev);
4117
Johan Hedberg9238f362013-03-05 20:37:48 +02004118 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004119 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004120
4121 /* If the command succeeded and there's still more commands in
4122 * this request the request is not yet complete.
4123 */
4124 if (!status && !hci_req_is_complete(hdev))
4125 return;
4126
4127 /* If this was the last command in a request the complete
4128 * callback would be found in hdev->sent_cmd instead of the
4129 * command queue (hdev->cmd_q).
4130 */
Johan Hedberge62144872015-04-02 13:41:08 +03004131 if (bt_cb(hdev->sent_cmd)->req.complete) {
4132 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4133 return;
4134 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004135
Johan Hedberge62144872015-04-02 13:41:08 +03004136 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4137 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4138 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004139 }
4140
4141 /* Remove all pending commands belonging to this request */
4142 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4143 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03004144 if (bt_cb(skb)->req.start) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004145 __skb_queue_head(&hdev->cmd_q, skb);
4146 break;
4147 }
4148
Johan Hedberge62144872015-04-02 13:41:08 +03004149 *req_complete = bt_cb(skb)->req.complete;
4150 *req_complete_skb = bt_cb(skb)->req.complete_skb;
Johan Hedberg9238f362013-03-05 20:37:48 +02004151 kfree_skb(skb);
4152 }
4153 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004154}
4155
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004156static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004158 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159 struct sk_buff *skb;
4160
4161 BT_DBG("%s", hdev->name);
4162
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004164 /* Send copy to monitor */
4165 hci_send_to_monitor(hdev, skb);
4166
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167 if (atomic_read(&hdev->promisc)) {
4168 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004169 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 }
4171
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004172 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173 kfree_skb(skb);
4174 continue;
4175 }
4176
4177 if (test_bit(HCI_INIT, &hdev->flags)) {
4178 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004179 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180 case HCI_ACLDATA_PKT:
4181 case HCI_SCODATA_PKT:
4182 kfree_skb(skb);
4183 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004184 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 }
4186
4187 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004188 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004190 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191 hci_event_packet(hdev, skb);
4192 break;
4193
4194 case HCI_ACLDATA_PKT:
4195 BT_DBG("%s ACL data packet", hdev->name);
4196 hci_acldata_packet(hdev, skb);
4197 break;
4198
4199 case HCI_SCODATA_PKT:
4200 BT_DBG("%s SCO data packet", hdev->name);
4201 hci_scodata_packet(hdev, skb);
4202 break;
4203
4204 default:
4205 kfree_skb(skb);
4206 break;
4207 }
4208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209}
4210
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004211static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004213 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214 struct sk_buff *skb;
4215
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004216 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4217 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004220 if (atomic_read(&hdev->cmd_cnt)) {
4221 skb = skb_dequeue(&hdev->cmd_q);
4222 if (!skb)
4223 return;
4224
Wei Yongjun7585b972009-02-25 18:29:52 +08004225 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004227 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004228 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004230 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004231 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004232 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004233 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004234 schedule_delayed_work(&hdev->cmd_timer,
4235 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236 } else {
4237 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004238 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239 }
4240 }
4241}