blob: 476709bd068a474f7edcac83a4869849ccfb4b17 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg0857dd32014-12-19 13:40:20 +020040#include "hci_request.h"
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +010041#include "hci_debugfs.h"
Johan Hedberg970c4e42014-02-18 10:19:33 +020042#include "smp.h"
43
Marcel Holtmannb78752c2010-08-08 23:06:53 -040044static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020045static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020046static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +020054DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Sasha Levin3df92b32012-05-27 22:36:56 +020056/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
Marcel Holtmann899de762014-07-11 05:51:58 +020059/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070075/* ---- HCI debugfs entries ---- */
76
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070077static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -070083 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070084 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700109 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700130 hci_dev_change_flag(hdev, HCI_DUT_MODE);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/* ---- HCI requests ---- */
143
Johan Hedbergf60cb302015-04-02 13:41:09 +0300144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
145 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200147 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
149 if (hdev->req_status == HCI_REQ_PEND) {
150 hdev->req_result = result;
151 hdev->req_status = HCI_REQ_DONE;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300152 if (skb)
153 hdev->req_skb = skb_get(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 wake_up_interruptible(&hdev->req_wait_q);
155 }
156}
157
158static void hci_req_cancel(struct hci_dev *hdev, int err)
159{
160 BT_DBG("%s err 0x%2.2x", hdev->name, err);
161
162 if (hdev->req_status == HCI_REQ_PEND) {
163 hdev->req_result = err;
164 hdev->req_status = HCI_REQ_CANCELED;
165 wake_up_interruptible(&hdev->req_wait_q);
166 }
167}
168
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300169struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300170 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300171{
172 DECLARE_WAITQUEUE(wait, current);
173 struct hci_request req;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300174 struct sk_buff *skb;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300175 int err = 0;
176
177 BT_DBG("%s", hdev->name);
178
179 hci_req_init(&req, hdev);
180
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300181 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300182
183 hdev->req_status = HCI_REQ_PEND;
184
Johan Hedberg75e84b72013-04-02 13:35:04 +0300185 add_wait_queue(&hdev->req_wait_q, &wait);
186 set_current_state(TASK_INTERRUPTIBLE);
187
Johan Hedbergf60cb302015-04-02 13:41:09 +0300188 err = hci_req_run_skb(&req, hci_req_sync_complete);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900189 if (err < 0) {
190 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200191 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900192 return ERR_PTR(err);
193 }
194
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195 schedule_timeout(timeout);
196
197 remove_wait_queue(&hdev->req_wait_q, &wait);
198
199 if (signal_pending(current))
200 return ERR_PTR(-EINTR);
201
202 switch (hdev->req_status) {
203 case HCI_REQ_DONE:
204 err = -bt_to_errno(hdev->req_result);
205 break;
206
207 case HCI_REQ_CANCELED:
208 err = -hdev->req_result;
209 break;
210
211 default:
212 err = -ETIMEDOUT;
213 break;
214 }
215
216 hdev->req_status = hdev->req_result = 0;
Johan Hedbergf60cb302015-04-02 13:41:09 +0300217 skb = hdev->req_skb;
218 hdev->req_skb = NULL;
Johan Hedberg75e84b72013-04-02 13:35:04 +0300219
220 BT_DBG("%s end: err %d", hdev->name, err);
221
Johan Hedbergf60cb302015-04-02 13:41:09 +0300222 if (err < 0) {
223 kfree_skb(skb);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300224 return ERR_PTR(err);
Johan Hedbergf60cb302015-04-02 13:41:09 +0300225 }
Johan Hedberg75e84b72013-04-02 13:35:04 +0300226
Johan Hedberg757aa0b2015-04-02 13:41:12 +0300227 if (!skb)
228 return ERR_PTR(-ENODATA);
229
230 return skb;
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300231}
232EXPORT_SYMBOL(__hci_cmd_sync_ev);
233
234struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300235 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300236{
237 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300238}
239EXPORT_SYMBOL(__hci_cmd_sync);
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200242static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200243 void (*func)(struct hci_request *req,
244 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200245 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200247 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 DECLARE_WAITQUEUE(wait, current);
249 int err = 0;
250
251 BT_DBG("%s start", hdev->name);
252
Johan Hedberg42c6b122013-03-05 20:37:49 +0200253 hci_req_init(&req, hdev);
254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 hdev->req_status = HCI_REQ_PEND;
256
Johan Hedberg42c6b122013-03-05 20:37:49 +0200257 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200258
Chan-yeol Park039fada2014-10-31 14:23:06 +0900259 add_wait_queue(&hdev->req_wait_q, &wait);
260 set_current_state(TASK_INTERRUPTIBLE);
261
Johan Hedbergf60cb302015-04-02 13:41:09 +0300262 err = hci_req_run_skb(&req, hci_req_sync_complete);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200264 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300265
Chan-yeol Park039fada2014-10-31 14:23:06 +0900266 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +0200267 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +0900268
Andre Guedes920c8302013-03-08 11:20:15 -0300269 /* ENODATA means the HCI request command queue is empty.
270 * This can happen when a request with conditionals doesn't
271 * trigger any commands to be sent. This is normal behavior
272 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200273 */
Andre Guedes920c8302013-03-08 11:20:15 -0300274 if (err == -ENODATA)
275 return 0;
276
277 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200278 }
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 schedule_timeout(timeout);
281
282 remove_wait_queue(&hdev->req_wait_q, &wait);
283
284 if (signal_pending(current))
285 return -EINTR;
286
287 switch (hdev->req_status) {
288 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700289 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 break;
291
292 case HCI_REQ_CANCELED:
293 err = -hdev->req_result;
294 break;
295
296 default:
297 err = -ETIMEDOUT;
298 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700299 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Johan Hedberga5040ef2011-01-10 13:28:59 +0200301 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303 BT_DBG("%s end: err %d", hdev->name, err);
304
305 return err;
306}
307
Johan Hedberg01178cd2013-03-05 20:37:41 +0200308static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200309 void (*req)(struct hci_request *req,
310 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200311 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312{
313 int ret;
314
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200315 if (!test_bit(HCI_UP, &hdev->flags))
316 return -ENETDOWN;
317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 /* Serialize all requests */
319 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200320 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 hci_req_unlock(hdev);
322
323 return ret;
324}
325
Johan Hedberg42c6b122013-03-05 20:37:49 +0200326static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200328 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 set_bit(HCI_RESET, &req->hdev->flags);
332 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333}
334
Johan Hedberg42c6b122013-03-05 20:37:49 +0200335static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200337 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200340 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200342 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200343 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344
345 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200346 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}
348
Johan Hedberg0af801b2015-02-17 15:05:21 +0200349static void amp_init1(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200350{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200352
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200353 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300355
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700356 /* Read Local Supported Commands */
357 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
358
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300359 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200360 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300361
362 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200363 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700364
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700365 /* Read Flow Control Mode */
366 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
367
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700368 /* Read Location Data */
369 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200370}
371
Johan Hedberg0af801b2015-02-17 15:05:21 +0200372static void amp_init2(struct hci_request *req)
373{
374 /* Read Local Supported Features. Not all AMP controllers
375 * support this so it's placed conditionally in the second
376 * stage init.
377 */
378 if (req->hdev->commands[14] & 0x20)
379 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
380}
381
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200383{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200384 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200385
386 BT_DBG("%s %ld", hdev->name, opt);
387
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300388 /* Reset */
389 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300391
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200392 switch (hdev->dev_type) {
393 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200395 break;
396
397 case HCI_AMP:
Johan Hedberg0af801b2015-02-17 15:05:21 +0200398 amp_init1(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200399 break;
400
401 default:
402 BT_ERR("Unknown device type %d", hdev->dev_type);
403 break;
404 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200405}
406
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200408{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200409 __le16 param;
410 __u8 flt_type;
411
412 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200413 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200414
415 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200416 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200417
418 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200420
421 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200422 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200423
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700424 /* Read Number of Supported IAC */
425 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
426
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700427 /* Read Current IAC LAP */
428 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
429
Johan Hedberg2177bab2013-03-05 20:37:43 +0200430 /* Clear Event Filters */
431 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433
434 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700435 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200436 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200437}
438
Johan Hedberg42c6b122013-03-05 20:37:49 +0200439static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200440{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300441 struct hci_dev *hdev = req->hdev;
442
Johan Hedberg2177bab2013-03-05 20:37:43 +0200443 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445
446 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200447 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200448
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800449 /* Read LE Supported States */
450 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
451
Johan Hedberg2177bab2013-03-05 20:37:43 +0200452 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200454
Marcel Holtmann747d3f02014-02-27 20:37:29 -0800455 /* Clear LE White List */
456 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300457
458 /* LE-only controllers have LE implicitly enabled */
459 if (!lmp_bredr_capable(hdev))
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700460 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200461}
462
Johan Hedberg42c6b122013-03-05 20:37:49 +0200463static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200464{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200465 struct hci_dev *hdev = req->hdev;
466
Johan Hedberg2177bab2013-03-05 20:37:43 +0200467 /* The second byte is 0xff instead of 0x9f (two reserved bits
468 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
469 * command otherwise.
470 */
471 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
472
473 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
474 * any event mask for pre 1.2 devices.
475 */
476 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
477 return;
478
479 if (lmp_bredr_capable(hdev)) {
480 events[4] |= 0x01; /* Flow Specification Complete */
481 events[4] |= 0x02; /* Inquiry Result with RSSI */
482 events[4] |= 0x04; /* Read Remote Extended Features Complete */
483 events[5] |= 0x08; /* Synchronous Connection Complete */
484 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700485 } else {
486 /* Use a different default for LE-only devices */
487 memset(events, 0, sizeof(events));
488 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700489 events[1] |= 0x08; /* Read Remote Version Information Complete */
490 events[1] |= 0x20; /* Command Complete */
491 events[1] |= 0x40; /* Command Status */
492 events[1] |= 0x80; /* Hardware Error */
493 events[2] |= 0x04; /* Number of Completed Packets */
494 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200495
496 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
497 events[0] |= 0x80; /* Encryption Change */
498 events[5] |= 0x80; /* Encryption Key Refresh Complete */
499 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200500 }
501
502 if (lmp_inq_rssi_capable(hdev))
503 events[4] |= 0x02; /* Inquiry Result with RSSI */
504
505 if (lmp_sniffsubr_capable(hdev))
506 events[5] |= 0x20; /* Sniff Subrating */
507
508 if (lmp_pause_enc_capable(hdev))
509 events[5] |= 0x80; /* Encryption Key Refresh Complete */
510
511 if (lmp_ext_inq_capable(hdev))
512 events[5] |= 0x40; /* Extended Inquiry Result */
513
514 if (lmp_no_flush_capable(hdev))
515 events[7] |= 0x01; /* Enhanced Flush Complete */
516
517 if (lmp_lsto_capable(hdev))
518 events[6] |= 0x80; /* Link Supervision Timeout Changed */
519
520 if (lmp_ssp_capable(hdev)) {
521 events[6] |= 0x01; /* IO Capability Request */
522 events[6] |= 0x02; /* IO Capability Response */
523 events[6] |= 0x04; /* User Confirmation Request */
524 events[6] |= 0x08; /* User Passkey Request */
525 events[6] |= 0x10; /* Remote OOB Data Request */
526 events[6] |= 0x20; /* Simple Pairing Complete */
527 events[7] |= 0x04; /* User Passkey Notification */
528 events[7] |= 0x08; /* Keypress Notification */
529 events[7] |= 0x10; /* Remote Host Supported
530 * Features Notification
531 */
532 }
533
534 if (lmp_le_capable(hdev))
535 events[7] |= 0x20; /* LE Meta-Event */
536
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200538}
539
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200542 struct hci_dev *hdev = req->hdev;
543
Johan Hedberg0af801b2015-02-17 15:05:21 +0200544 if (hdev->dev_type == HCI_AMP)
545 return amp_init2(req);
546
Johan Hedberg2177bab2013-03-05 20:37:43 +0200547 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300549 else
Marcel Holtmanna358dc12015-03-13 02:11:02 -0700550 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
552 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100555 /* All Bluetooth 1.2 and later controllers should support the
556 * HCI command for reading the local supported commands.
557 *
558 * Unfortunately some controllers indicate Bluetooth 1.2 support,
559 * but do not have support for this command. If that is the case,
560 * the driver can quirk the behavior and skip reading the local
561 * supported commands.
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300562 */
Marcel Holtmann0f3adea2014-12-26 04:42:34 +0100563 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
564 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200565 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200566
567 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -0700568 /* When SSP is available, then the host features page
569 * should also be available as well. However some
570 * controllers list the max_page as 0 as long as SSP
571 * has not been enabled. To achieve proper debugging
572 * output, force the minimum max_page to 1 at least.
573 */
574 hdev->max_page = 0x01;
575
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700576 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200577 u8 mode = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800578
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
580 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581 } else {
582 struct hci_cp_write_eir cp;
583
584 memset(hdev->eir, 0, sizeof(hdev->eir));
585 memset(&cp, 0, sizeof(cp));
586
Johan Hedberg42c6b122013-03-05 20:37:49 +0200587 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200588 }
589 }
590
Marcel Holtmann043ec9b2015-01-02 23:35:19 -0800591 if (lmp_inq_rssi_capable(hdev) ||
592 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
Marcel Holtmann04422da2015-01-02 23:35:18 -0800593 u8 mode;
594
595 /* If Extended Inquiry Result events are supported, then
596 * they are clearly preferred over Inquiry Result with RSSI
597 * events.
598 */
599 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
600
601 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
602 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200603
604 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200605 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200606
607 if (lmp_ext_feat_capable(hdev)) {
608 struct hci_cp_read_local_ext_features cp;
609
610 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200611 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
612 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200613 }
614
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700615 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200616 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200617 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
618 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200619 }
620}
621
Johan Hedberg42c6b122013-03-05 20:37:49 +0200622static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200623{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200624 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200625 struct hci_cp_write_def_link_policy cp;
626 u16 link_policy = 0;
627
628 if (lmp_rswitch_capable(hdev))
629 link_policy |= HCI_LP_RSWITCH;
630 if (lmp_hold_capable(hdev))
631 link_policy |= HCI_LP_HOLD;
632 if (lmp_sniff_capable(hdev))
633 link_policy |= HCI_LP_SNIFF;
634 if (lmp_park_capable(hdev))
635 link_policy |= HCI_LP_PARK;
636
637 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200639}
640
Johan Hedberg42c6b122013-03-05 20:37:49 +0200641static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200642{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200643 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200644 struct hci_cp_write_le_host_supported cp;
645
Johan Hedbergc73eee92013-04-19 18:35:21 +0300646 /* LE-only devices do not support explicit enablement */
647 if (!lmp_bredr_capable(hdev))
648 return;
649
Johan Hedberg2177bab2013-03-05 20:37:43 +0200650 memset(&cp, 0, sizeof(cp));
651
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700652 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2177bab2013-03-05 20:37:43 +0200653 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +0200654 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200655 }
656
657 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200658 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
659 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660}
661
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300662static void hci_set_event_mask_page_2(struct hci_request *req)
663{
664 struct hci_dev *hdev = req->hdev;
665 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
666
667 /* If Connectionless Slave Broadcast master role is supported
668 * enable all necessary events for it.
669 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800670 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300671 events[1] |= 0x40; /* Triggered Clock Capture */
672 events[1] |= 0x80; /* Synchronization Train Complete */
673 events[2] |= 0x10; /* Slave Page Response Timeout */
674 events[2] |= 0x20; /* CSB Channel Map Change */
675 }
676
677 /* If Connectionless Slave Broadcast slave role is supported
678 * enable all necessary events for it.
679 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800680 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300681 events[2] |= 0x01; /* Synchronization Train Received */
682 events[2] |= 0x02; /* CSB Receive */
683 events[2] |= 0x04; /* CSB Timeout */
684 events[2] |= 0x08; /* Truncated Page Complete */
685 }
686
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800687 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +0200688 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -0800689 events[2] |= 0x80;
690
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300691 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
692}
693
Johan Hedberg42c6b122013-03-05 20:37:49 +0200694static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200695{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200696 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300697 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200698
Marcel Holtmann0da71f12014-07-12 23:36:16 +0200699 hci_setup_event_mask(req);
700
Marcel Holtmann48ce62c2015-01-12 09:21:26 -0800701 if (hdev->commands[6] & 0x20) {
702 struct hci_cp_read_stored_link_key cp;
703
704 bacpy(&cp.bdaddr, BDADDR_ANY);
705 cp.read_all = 0x01;
706 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
707 }
708
Johan Hedberg2177bab2013-03-05 20:37:43 +0200709 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200710 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200711
Marcel Holtmann417287d2014-12-11 20:21:54 +0100712 if (hdev->commands[8] & 0x01)
713 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
714
715 /* Some older Broadcom based Bluetooth 1.2 controllers do not
716 * support the Read Page Scan Type command. Check support for
717 * this command in the bit mask of supported commands.
718 */
719 if (hdev->commands[13] & 0x01)
720 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
721
Andre Guedes9193c6e2014-07-01 18:10:09 -0300722 if (lmp_le_capable(hdev)) {
723 u8 events[8];
724
725 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +0200726 events[0] = 0x0f;
727
728 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
729 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -0300730
731 /* If controller supports the Connection Parameters Request
732 * Link Layer Procedure, enable the corresponding event.
733 */
734 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
735 events[0] |= 0x20; /* LE Remote Connection
736 * Parameter Request
737 */
738
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100739 /* If the controller supports the Data Length Extension
740 * feature, enable the corresponding event.
741 */
742 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
743 events[0] |= 0x40; /* LE Data Length Change */
744
Marcel Holtmann4b71bba2014-12-05 16:20:12 +0100745 /* If the controller supports Extended Scanner Filter
746 * Policies, enable the correspondig event.
747 */
748 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
749 events[1] |= 0x04; /* LE Direct Advertising
750 * Report
751 */
752
Marcel Holtmann5a34bd52014-12-05 16:20:15 +0100753 /* If the controller supports the LE Read Local P-256
754 * Public Key command, enable the corresponding event.
755 */
756 if (hdev->commands[34] & 0x02)
757 events[0] |= 0x80; /* LE Read Local P-256
758 * Public Key Complete
759 */
760
761 /* If the controller supports the LE Generate DHKey
762 * command, enable the corresponding event.
763 */
764 if (hdev->commands[34] & 0x04)
765 events[1] |= 0x01; /* LE Generate DHKey Complete */
766
Andre Guedes9193c6e2014-07-01 18:10:09 -0300767 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
768 events);
769
Marcel Holtmann15a49cc2014-07-12 23:20:50 +0200770 if (hdev->commands[25] & 0x40) {
771 /* Read LE Advertising Channel TX Power */
772 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
773 }
774
Marcel Holtmanna9f60682014-12-20 16:28:39 +0100775 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
776 /* Read LE Maximum Data Length */
777 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
778
779 /* Read LE Suggested Default Data Length */
780 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
781 }
782
Johan Hedberg42c6b122013-03-05 20:37:49 +0200783 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -0300784 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300785
786 /* Read features beyond page 1 if available */
787 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
788 struct hci_cp_read_local_ext_features cp;
789
790 cp.page = p;
791 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
792 sizeof(cp), &cp);
793 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200794}
795
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300796static void hci_init4_req(struct hci_request *req, unsigned long opt)
797{
798 struct hci_dev *hdev = req->hdev;
799
Marcel Holtmann36f260c2015-01-12 22:47:22 -0800800 /* Some Broadcom based Bluetooth controllers do not support the
801 * Delete Stored Link Key command. They are clearly indicating its
802 * absence in the bit mask of supported commands.
803 *
804 * Check the supported commands and only if the the command is marked
805 * as supported send it. If not supported assume that the controller
806 * does not have actual support for stored link keys which makes this
807 * command redundant anyway.
808 *
809 * Some controllers indicate that they support handling deleting
810 * stored link keys, but they don't. The quirk lets a driver
811 * just disable this command.
812 */
813 if (hdev->commands[6] & 0x80 &&
814 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
815 struct hci_cp_delete_stored_link_key cp;
816
817 bacpy(&cp.bdaddr, BDADDR_ANY);
818 cp.delete_all = 0x01;
819 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
820 sizeof(cp), &cp);
821 }
822
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300823 /* Set event mask page 2 if the HCI command for it is supported */
824 if (hdev->commands[22] & 0x04)
825 hci_set_event_mask_page_2(req);
826
Marcel Holtmann109e3192014-07-23 19:24:56 +0200827 /* Read local codec list if the HCI command is supported */
828 if (hdev->commands[29] & 0x20)
829 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
830
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +0200831 /* Get MWS transport configuration if the HCI command is supported */
832 if (hdev->commands[30] & 0x08)
833 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
834
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300835 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -0800836 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300837 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800838
839 /* Enable Secure Connections if supported and configured */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700840 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800841 bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800842 u8 support = 0x01;
Marcel Holtmann574ea3c2015-01-22 11:15:20 -0800843
Marcel Holtmanna6d0d692014-01-10 02:07:24 -0800844 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
845 sizeof(support), &support);
846 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300847}
848
Johan Hedberg2177bab2013-03-05 20:37:43 +0200849static int __hci_init(struct hci_dev *hdev)
850{
851 int err;
852
853 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
854 if (err < 0)
855 return err;
856
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700857 /* The Device Under Test (DUT) mode is special and available for
858 * all controller types. So just create it early on.
859 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700860 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700861 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
862 &dut_mode_fops);
863 }
864
Johan Hedberg2177bab2013-03-05 20:37:43 +0200865 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
866 if (err < 0)
867 return err;
868
Johan Hedberg0af801b2015-02-17 15:05:21 +0200869 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
870 * BR/EDR/LE type controllers. AMP controllers only need the
871 * first two stages of init.
872 */
873 if (hdev->dev_type != HCI_BREDR)
874 return 0;
875
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300876 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
877 if (err < 0)
878 return err;
879
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700880 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
881 if (err < 0)
882 return err;
883
Marcel Holtmannec6cef92015-01-01 02:05:16 -0800884 /* This function is only called when the controller is actually in
885 * configured state. When the controller is marked as unconfigured,
886 * this initialization procedure is not run.
887 *
888 * It means that it is possible that a controller runs through its
889 * setup phase and then discovers missing settings. If that is the
890 * case, then this function will not be called. It then will only
891 * be called during the config phase.
892 *
893 * So only when in setup phase or config phase, create the debugfs
894 * entries and register the SMP channels.
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700895 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700896 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
897 !hci_dev_test_flag(hdev, HCI_CONFIG))
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700898 return 0;
899
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100900 hci_debugfs_create_common(hdev);
901
Marcel Holtmann71c3b602014-12-20 16:05:15 +0100902 if (lmp_bredr_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100903 hci_debugfs_create_bredr(hdev);
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700904
Marcel Holtmann162a3ba2015-01-14 15:43:11 -0800905 if (lmp_le_capable(hdev))
Marcel Holtmann60c5f5f2014-12-20 16:05:13 +0100906 hci_debugfs_create_le(hdev);
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700907
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700908 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200909}
910
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200911static void hci_init0_req(struct hci_request *req, unsigned long opt)
912{
913 struct hci_dev *hdev = req->hdev;
914
915 BT_DBG("%s %ld", hdev->name, opt);
916
917 /* Reset */
918 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
919 hci_reset_req(req, 0);
920
921 /* Read Local Version */
922 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
923
924 /* Read BD Address */
925 if (hdev->set_bdaddr)
926 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
927}
928
929static int __hci_unconf_init(struct hci_dev *hdev)
930{
931 int err;
932
Marcel Holtmanncc78b442014-07-06 13:43:20 +0200933 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
934 return 0;
935
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +0200936 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
937 if (err < 0)
938 return err;
939
940 return 0;
941}
942
Johan Hedberg42c6b122013-03-05 20:37:49 +0200943static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944{
945 __u8 scan = opt;
946
Johan Hedberg42c6b122013-03-05 20:37:49 +0200947 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
949 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
952
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
955 __u8 auth = opt;
956
Johan Hedberg42c6b122013-03-05 20:37:49 +0200957 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
959 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200960 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961}
962
Johan Hedberg42c6b122013-03-05 20:37:49 +0200963static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964{
965 __u8 encrypt = opt;
966
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200969 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971}
972
Johan Hedberg42c6b122013-03-05 20:37:49 +0200973static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200974{
975 __le16 policy = cpu_to_le16(opt);
976
Johan Hedberg42c6b122013-03-05 20:37:49 +0200977 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200978
979 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200980 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200981}
982
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900983/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 * Device is held on return. */
985struct hci_dev *hci_dev_get(int index)
986{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200987 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
989 BT_DBG("%d", index);
990
991 if (index < 0)
992 return NULL;
993
994 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200995 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 if (d->id == index) {
997 hdev = hci_dev_hold(d);
998 break;
999 }
1000 }
1001 read_unlock(&hci_dev_list_lock);
1002 return hdev;
1003}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001006
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001007bool hci_discovery_active(struct hci_dev *hdev)
1008{
1009 struct discovery_state *discov = &hdev->discovery;
1010
Andre Guedes6fbe1952012-02-03 17:47:58 -03001011 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001012 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001013 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001014 return true;
1015
Andre Guedes6fbe1952012-02-03 17:47:58 -03001016 default:
1017 return false;
1018 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001019}
1020
Johan Hedbergff9ef572012-01-04 14:23:45 +02001021void hci_discovery_set_state(struct hci_dev *hdev, int state)
1022{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001023 int old_state = hdev->discovery.state;
1024
Johan Hedbergff9ef572012-01-04 14:23:45 +02001025 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1026
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001027 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001028 return;
1029
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001030 hdev->discovery.state = state;
1031
Johan Hedbergff9ef572012-01-04 14:23:45 +02001032 switch (state) {
1033 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001034 hci_update_background_scan(hdev);
1035
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001036 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001037 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001038 break;
1039 case DISCOVERY_STARTING:
1040 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001041 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001042 mgmt_discovering(hdev, 1);
1043 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001044 case DISCOVERY_RESOLVING:
1045 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001046 case DISCOVERY_STOPPING:
1047 break;
1048 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001049}
1050
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001051void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052{
Johan Hedberg30883512012-01-04 14:16:21 +02001053 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001054 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055
Johan Hedberg561aafb2012-01-04 13:31:59 +02001056 list_for_each_entry_safe(p, n, &cache->all, all) {
1057 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001058 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001060
1061 INIT_LIST_HEAD(&cache->unknown);
1062 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063}
1064
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001065struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1066 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067{
Johan Hedberg30883512012-01-04 14:16:21 +02001068 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 struct inquiry_entry *e;
1070
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001071 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072
Johan Hedberg561aafb2012-01-04 13:31:59 +02001073 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001075 return e;
1076 }
1077
1078 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079}
1080
Johan Hedberg561aafb2012-01-04 13:31:59 +02001081struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001082 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001083{
Johan Hedberg30883512012-01-04 14:16:21 +02001084 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001085 struct inquiry_entry *e;
1086
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001087 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001088
1089 list_for_each_entry(e, &cache->unknown, list) {
1090 if (!bacmp(&e->data.bdaddr, bdaddr))
1091 return e;
1092 }
1093
1094 return NULL;
1095}
1096
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001097struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001098 bdaddr_t *bdaddr,
1099 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001100{
1101 struct discovery_state *cache = &hdev->discovery;
1102 struct inquiry_entry *e;
1103
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001104 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001105
1106 list_for_each_entry(e, &cache->resolve, list) {
1107 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1108 return e;
1109 if (!bacmp(&e->data.bdaddr, bdaddr))
1110 return e;
1111 }
1112
1113 return NULL;
1114}
1115
Johan Hedberga3d4e202012-01-09 00:53:02 +02001116void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001117 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001118{
1119 struct discovery_state *cache = &hdev->discovery;
1120 struct list_head *pos = &cache->resolve;
1121 struct inquiry_entry *p;
1122
1123 list_del(&ie->list);
1124
1125 list_for_each_entry(p, &cache->resolve, list) {
1126 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001127 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001128 break;
1129 pos = &p->list;
1130 }
1131
1132 list_add(&ie->list, pos);
1133}
1134
Marcel Holtmannaf589252014-07-01 14:11:20 +02001135u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1136 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137{
Johan Hedberg30883512012-01-04 14:16:21 +02001138 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001139 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001140 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001142 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
Johan Hedberg6928a922014-10-26 20:46:09 +01001144 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
Szymon Janc2b2fec42012-11-20 11:38:54 +01001145
Marcel Holtmannaf589252014-07-01 14:11:20 +02001146 if (!data->ssp_mode)
1147 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001148
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001149 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001150 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001151 if (!ie->data.ssp_mode)
1152 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001153
Johan Hedberga3d4e202012-01-09 00:53:02 +02001154 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001155 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001156 ie->data.rssi = data->rssi;
1157 hci_inquiry_cache_update_resolve(hdev, ie);
1158 }
1159
Johan Hedberg561aafb2012-01-04 13:31:59 +02001160 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001161 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001162
Johan Hedberg561aafb2012-01-04 13:31:59 +02001163 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03001164 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02001165 if (!ie) {
1166 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1167 goto done;
1168 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001169
1170 list_add(&ie->all, &cache->all);
1171
1172 if (name_known) {
1173 ie->name_state = NAME_KNOWN;
1174 } else {
1175 ie->name_state = NAME_NOT_KNOWN;
1176 list_add(&ie->list, &cache->unknown);
1177 }
1178
1179update:
1180 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001181 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001182 ie->name_state = NAME_KNOWN;
1183 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 }
1185
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001186 memcpy(&ie->data, data, sizeof(*data));
1187 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001189
1190 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02001191 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02001192
Marcel Holtmannaf589252014-07-01 14:11:20 +02001193done:
1194 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195}
1196
1197static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1198{
Johan Hedberg30883512012-01-04 14:16:21 +02001199 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 struct inquiry_info *info = (struct inquiry_info *) buf;
1201 struct inquiry_entry *e;
1202 int copied = 0;
1203
Johan Hedberg561aafb2012-01-04 13:31:59 +02001204 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001206
1207 if (copied >= num)
1208 break;
1209
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 bacpy(&info->bdaddr, &data->bdaddr);
1211 info->pscan_rep_mode = data->pscan_rep_mode;
1212 info->pscan_period_mode = data->pscan_period_mode;
1213 info->pscan_mode = data->pscan_mode;
1214 memcpy(info->dev_class, data->dev_class, 3);
1215 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001216
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001218 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 }
1220
1221 BT_DBG("cache %p, copied %d", cache, copied);
1222 return copied;
1223}
1224
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226{
1227 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 struct hci_cp_inquiry cp;
1230
1231 BT_DBG("%s", hdev->name);
1232
1233 if (test_bit(HCI_INQUIRY, &hdev->flags))
1234 return;
1235
1236 /* Start Inquiry */
1237 memcpy(&cp.lap, &ir->lap, 3);
1238 cp.length = ir->length;
1239 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001240 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241}
1242
1243int hci_inquiry(void __user *arg)
1244{
1245 __u8 __user *ptr = arg;
1246 struct hci_inquiry_req ir;
1247 struct hci_dev *hdev;
1248 int err = 0, do_inquiry = 0, max_rsp;
1249 long timeo;
1250 __u8 *buf;
1251
1252 if (copy_from_user(&ir, ptr, sizeof(ir)))
1253 return -EFAULT;
1254
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001255 hdev = hci_dev_get(ir.dev_id);
1256 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 return -ENODEV;
1258
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001259 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001260 err = -EBUSY;
1261 goto done;
1262 }
1263
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001264 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001265 err = -EOPNOTSUPP;
1266 goto done;
1267 }
1268
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001269 if (hdev->dev_type != HCI_BREDR) {
1270 err = -EOPNOTSUPP;
1271 goto done;
1272 }
1273
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001274 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001275 err = -EOPNOTSUPP;
1276 goto done;
1277 }
1278
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001279 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001280 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001281 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001282 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 do_inquiry = 1;
1284 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001285 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
Marcel Holtmann04837f62006-07-03 10:02:33 +02001287 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001288
1289 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001290 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1291 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001292 if (err < 0)
1293 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001294
1295 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1296 * cleared). If it is interrupted by a signal, return -EINTR.
1297 */
NeilBrown74316202014-07-07 15:16:04 +10001298 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03001299 TASK_INTERRUPTIBLE))
1300 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001301 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001303 /* for unlimited number of responses we will use buffer with
1304 * 255 entries
1305 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1307
1308 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1309 * copy it to the user space.
1310 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001311 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001312 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 err = -ENOMEM;
1314 goto done;
1315 }
1316
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001317 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001319 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321 BT_DBG("num_rsp %d", ir.num_rsp);
1322
1323 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1324 ptr += sizeof(ir);
1325 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001326 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001328 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 err = -EFAULT;
1330
1331 kfree(buf);
1332
1333done:
1334 hci_dev_put(hdev);
1335 return err;
1336}
1337
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001338static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 int ret = 0;
1341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 BT_DBG("%s %p", hdev->name, hdev);
1343
1344 hci_req_lock(hdev);
1345
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001346 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Johan Hovold94324962012-03-15 14:48:41 +01001347 ret = -ENODEV;
1348 goto done;
1349 }
1350
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001351 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1352 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001353 /* Check for rfkill but allow the HCI setup stage to
1354 * proceed (which in itself doesn't cause any RF activity).
1355 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001356 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001357 ret = -ERFKILL;
1358 goto done;
1359 }
1360
1361 /* Check for valid public address or a configured static
1362 * random adddress, but let the HCI setup proceed to
1363 * be able to determine if there is a public address
1364 * or not.
1365 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001366 * In case of user channel usage, it is not important
1367 * if a public address or static random address is
1368 * available.
1369 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001370 * This check is only valid for BR/EDR controllers
1371 * since AMP controllers do not have an address.
1372 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001373 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmannc6beca02014-02-17 09:21:19 -08001374 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001375 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1376 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1377 ret = -EADDRNOTAVAIL;
1378 goto done;
1379 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001380 }
1381
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 if (test_bit(HCI_UP, &hdev->flags)) {
1383 ret = -EALREADY;
1384 goto done;
1385 }
1386
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 if (hdev->open(hdev)) {
1388 ret = -EIO;
1389 goto done;
1390 }
1391
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001392 atomic_set(&hdev->cmd_cnt, 1);
1393 set_bit(HCI_INIT, &hdev->flags);
1394
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001395 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001396 if (hdev->setup)
1397 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001398
Marcel Holtmannaf202f82014-07-04 17:23:34 +02001399 /* The transport driver can set these quirks before
1400 * creating the HCI device or in its setup callback.
1401 *
1402 * In case any of them is set, the controller has to
1403 * start up as unconfigured.
1404 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02001405 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1406 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001407 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001408
1409 /* For an unconfigured controller it is required to
1410 * read at least the version information provided by
1411 * the Read Local Version Information command.
1412 *
1413 * If the set_bdaddr driver callback is provided, then
1414 * also the original Bluetooth public device address
1415 * will be read using the Read BD Address command.
1416 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001417 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001418 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02001419 }
1420
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001421 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann9713c172014-07-06 12:11:15 +02001422 /* If public address change is configured, ensure that
1423 * the address gets programmed. If the driver does not
1424 * support changing the public address, fail the power
1425 * on procedure.
1426 */
1427 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1428 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02001429 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1430 else
1431 ret = -EADDRNOTAVAIL;
1432 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001433
1434 if (!ret) {
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001435 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1436 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001437 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 }
1439
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001440 clear_bit(HCI_INIT, &hdev->flags);
1441
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 if (!ret) {
1443 hci_dev_hold(hdev);
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001444 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 set_bit(HCI_UP, &hdev->flags);
1446 hci_notify(hdev, HCI_DEV_UP);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001447 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1448 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1449 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1450 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001451 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001452 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001453 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001454 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001455 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001456 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001458 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001459 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001460 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461
1462 skb_queue_purge(&hdev->cmd_q);
1463 skb_queue_purge(&hdev->rx_q);
1464
1465 if (hdev->flush)
1466 hdev->flush(hdev);
1467
1468 if (hdev->sent_cmd) {
1469 kfree_skb(hdev->sent_cmd);
1470 hdev->sent_cmd = NULL;
1471 }
1472
1473 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001474 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 }
1476
1477done:
1478 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 return ret;
1480}
1481
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001482/* ---- HCI ioctl helpers ---- */
1483
1484int hci_dev_open(__u16 dev)
1485{
1486 struct hci_dev *hdev;
1487 int err;
1488
1489 hdev = hci_dev_get(dev);
1490 if (!hdev)
1491 return -ENODEV;
1492
Marcel Holtmann4a964402014-07-02 19:10:33 +02001493 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001494 * up as user channel. Trying to bring them up as normal devices
1495 * will result into a failure. Only user channel operation is
1496 * possible.
1497 *
1498 * When this function is called for a user channel, the flag
1499 * HCI_USER_CHANNEL will be set first before attempting to
1500 * open the device.
1501 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001502 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1503 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001504 err = -EOPNOTSUPP;
1505 goto done;
1506 }
1507
Johan Hedberge1d08f42013-10-01 22:44:50 +03001508 /* We need to ensure that no other power on/off work is pending
1509 * before proceeding to call hci_dev_do_open. This is
1510 * particularly important if the setup procedure has not yet
1511 * completed.
1512 */
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001513 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Johan Hedberge1d08f42013-10-01 22:44:50 +03001514 cancel_delayed_work(&hdev->power_off);
1515
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001516 /* After this call it is guaranteed that the setup procedure
1517 * has finished. This means that error conditions like RFKILL
1518 * or no valid public or static random address apply.
1519 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001520 flush_workqueue(hdev->req_workqueue);
1521
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001522 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03001523 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001524 * so that pairing works for them. Once the management interface
1525 * is in use this bit will be cleared again and userspace has
1526 * to explicitly enable it.
1527 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001528 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1529 !hci_dev_test_flag(hdev, HCI_MGMT))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001530 hci_dev_set_flag(hdev, HCI_BONDABLE);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02001531
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001532 err = hci_dev_do_open(hdev);
1533
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001534done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001535 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001536 return err;
1537}
1538
Johan Hedbergd7347f32014-07-04 12:37:23 +03001539/* This function requires the caller holds hdev->lock */
1540static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1541{
1542 struct hci_conn_params *p;
1543
Johan Hedbergf161dd42014-08-15 21:06:54 +03001544 list_for_each_entry(p, &hdev->le_conn_params, list) {
1545 if (p->conn) {
1546 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03001547 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001548 p->conn = NULL;
1549 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001550 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001551 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03001552
1553 BT_DBG("All LE pending actions cleared");
1554}
1555
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556static int hci_dev_do_close(struct hci_dev *hdev)
1557{
1558 BT_DBG("%s %p", hdev->name, hdev);
1559
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001560 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
Tedd Ho-Jeong Ana44fecb2015-02-13 09:20:50 -08001561 /* Execute vendor specific shutdown routine */
1562 if (hdev->shutdown)
1563 hdev->shutdown(hdev);
1564 }
1565
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001566 cancel_delayed_work(&hdev->power_off);
1567
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 hci_req_cancel(hdev, ENODEV);
1569 hci_req_lock(hdev);
1570
1571 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001572 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 hci_req_unlock(hdev);
1574 return 0;
1575 }
1576
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001577 /* Flush RX and TX works */
1578 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001579 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001581 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001582 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001583 hdev->discov_timeout = 0;
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001584 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1585 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001586 }
1587
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001588 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
Johan Hedberg7d785252011-12-15 00:47:39 +02001589 cancel_delayed_work(&hdev->service_cache);
1590
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001591 cancel_delayed_work_sync(&hdev->le_scan_disable);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08001592 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg4518bb02014-02-24 20:35:07 +02001593
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001594 if (hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg4518bb02014-02-24 20:35:07 +02001595 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001596
Johan Hedberg76727c02014-11-18 09:00:14 +02001597 /* Avoid potential lockdep warnings from the *_flush() calls by
1598 * ensuring the workqueue is empty up front.
1599 */
1600 drain_workqueue(hdev->workqueue);
1601
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001602 hci_dev_lock(hdev);
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001603
Johan Hedberg8f502f82015-01-28 19:56:02 +02001604 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1605
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001606 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg1aeb9c62014-12-11 21:45:46 +02001607 if (hdev->dev_type == HCI_BREDR)
1608 mgmt_powered(hdev, 0);
1609 }
1610
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001611 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03001612 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03001613 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001614 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
Marcel Holtmann64dae962015-01-28 14:10:28 -08001616 smp_unregister(hdev);
1617
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 hci_notify(hdev, HCI_DEV_DOWN);
1619
1620 if (hdev->flush)
1621 hdev->flush(hdev);
1622
1623 /* Reset device */
1624 skb_queue_purge(&hdev->cmd_q);
1625 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001626 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1627 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001628 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001630 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 clear_bit(HCI_INIT, &hdev->flags);
1632 }
1633
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001634 /* flush cmd work */
1635 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
1637 /* Drop queues */
1638 skb_queue_purge(&hdev->rx_q);
1639 skb_queue_purge(&hdev->cmd_q);
1640 skb_queue_purge(&hdev->raw_q);
1641
1642 /* Drop last sent command */
1643 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02001644 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 kfree_skb(hdev->sent_cmd);
1646 hdev->sent_cmd = NULL;
1647 }
1648
1649 /* After this point our queues are empty
1650 * and no tasks are scheduled. */
1651 hdev->close(hdev);
1652
Johan Hedberg35b973c2013-03-15 17:06:59 -05001653 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001654 hdev->flags &= BIT(HCI_RAW);
Marcel Holtmanneacb44d2015-03-13 09:04:17 -07001655 hci_dev_clear_volatile_flags(hdev);
Johan Hedberg35b973c2013-03-15 17:06:59 -05001656
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001657 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001658 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001659
Johan Hedberge59fda82012-02-22 18:11:53 +02001660 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001661 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001662 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02001663
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 hci_req_unlock(hdev);
1665
1666 hci_dev_put(hdev);
1667 return 0;
1668}
1669
1670int hci_dev_close(__u16 dev)
1671{
1672 struct hci_dev *hdev;
1673 int err;
1674
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001675 hdev = hci_dev_get(dev);
1676 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001678
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001680 err = -EBUSY;
1681 goto done;
1682 }
1683
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001684 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001685 cancel_delayed_work(&hdev->power_off);
1686
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001688
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001689done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 hci_dev_put(hdev);
1691 return err;
1692}
1693
Marcel Holtmann5c912492015-01-28 11:53:05 -08001694static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695{
Marcel Holtmann5c912492015-01-28 11:53:05 -08001696 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
Marcel Holtmann5c912492015-01-28 11:53:05 -08001698 BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
1700 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 /* Drop queues */
1703 skb_queue_purge(&hdev->rx_q);
1704 skb_queue_purge(&hdev->cmd_q);
1705
Johan Hedberg76727c02014-11-18 09:00:14 +02001706 /* Avoid potential lockdep warnings from the *_flush() calls by
1707 * ensuring the workqueue is empty up front.
1708 */
1709 drain_workqueue(hdev->workqueue);
1710
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001711 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001712 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001714 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
1716 if (hdev->flush)
1717 hdev->flush(hdev);
1718
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001719 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001720 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001722 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 return ret;
1726}
1727
Marcel Holtmann5c912492015-01-28 11:53:05 -08001728int hci_dev_reset(__u16 dev)
1729{
1730 struct hci_dev *hdev;
1731 int err;
1732
1733 hdev = hci_dev_get(dev);
1734 if (!hdev)
1735 return -ENODEV;
1736
1737 if (!test_bit(HCI_UP, &hdev->flags)) {
1738 err = -ENETDOWN;
1739 goto done;
1740 }
1741
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001742 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001743 err = -EBUSY;
1744 goto done;
1745 }
1746
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001747 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmann5c912492015-01-28 11:53:05 -08001748 err = -EOPNOTSUPP;
1749 goto done;
1750 }
1751
1752 err = hci_dev_do_reset(hdev);
1753
1754done:
1755 hci_dev_put(hdev);
1756 return err;
1757}
1758
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759int hci_dev_reset_stat(__u16 dev)
1760{
1761 struct hci_dev *hdev;
1762 int ret = 0;
1763
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001764 hdev = hci_dev_get(dev);
1765 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 return -ENODEV;
1767
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001768 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001769 ret = -EBUSY;
1770 goto done;
1771 }
1772
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001773 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001774 ret = -EOPNOTSUPP;
1775 goto done;
1776 }
1777
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1779
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001780done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 return ret;
1783}
1784
Johan Hedberg123abc02014-07-10 12:09:07 +03001785static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1786{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001787 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03001788
1789 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1790
1791 if ((scan & SCAN_PAGE))
Marcel Holtmann238be782015-03-13 02:11:06 -07001792 conn_changed = !hci_dev_test_and_set_flag(hdev,
1793 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001794 else
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001795 conn_changed = hci_dev_test_and_clear_flag(hdev,
1796 HCI_CONNECTABLE);
Johan Hedberg123abc02014-07-10 12:09:07 +03001797
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001798 if ((scan & SCAN_INQUIRY)) {
Marcel Holtmann238be782015-03-13 02:11:06 -07001799 discov_changed = !hci_dev_test_and_set_flag(hdev,
1800 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001801 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07001802 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001803 discov_changed = hci_dev_test_and_clear_flag(hdev,
1804 HCI_DISCOVERABLE);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001805 }
1806
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001807 if (!hci_dev_test_flag(hdev, HCI_MGMT))
Johan Hedberg123abc02014-07-10 12:09:07 +03001808 return;
1809
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001810 if (conn_changed || discov_changed) {
1811 /* In case this was disabled through mgmt */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001812 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001813
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001814 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001815 mgmt_update_adv_data(hdev);
1816
Johan Hedberg123abc02014-07-10 12:09:07 +03001817 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001818 }
Johan Hedberg123abc02014-07-10 12:09:07 +03001819}
1820
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821int hci_dev_cmd(unsigned int cmd, void __user *arg)
1822{
1823 struct hci_dev *hdev;
1824 struct hci_dev_req dr;
1825 int err = 0;
1826
1827 if (copy_from_user(&dr, arg, sizeof(dr)))
1828 return -EFAULT;
1829
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001830 hdev = hci_dev_get(dr.dev_id);
1831 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 return -ENODEV;
1833
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001834 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001835 err = -EBUSY;
1836 goto done;
1837 }
1838
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001839 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001840 err = -EOPNOTSUPP;
1841 goto done;
1842 }
1843
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001844 if (hdev->dev_type != HCI_BREDR) {
1845 err = -EOPNOTSUPP;
1846 goto done;
1847 }
1848
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001849 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
Johan Hedberg56f87902013-10-02 13:43:13 +03001850 err = -EOPNOTSUPP;
1851 goto done;
1852 }
1853
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 switch (cmd) {
1855 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001856 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1857 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 break;
1859
1860 case HCISETENCRYPT:
1861 if (!lmp_encrypt_capable(hdev)) {
1862 err = -EOPNOTSUPP;
1863 break;
1864 }
1865
1866 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1867 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001868 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1869 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 if (err)
1871 break;
1872 }
1873
Johan Hedberg01178cd2013-03-05 20:37:41 +02001874 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1875 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 break;
1877
1878 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001879 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1880 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03001881
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03001882 /* Ensure that the connectable and discoverable states
1883 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03001884 */
Johan Hedberg123abc02014-07-10 12:09:07 +03001885 if (!err)
1886 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 break;
1888
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001889 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001890 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1891 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001892 break;
1893
1894 case HCISETLINKMODE:
1895 hdev->link_mode = ((__u16) dr.dev_opt) &
1896 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1897 break;
1898
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 case HCISETPTYPE:
1900 hdev->pkt_type = (__u16) dr.dev_opt;
1901 break;
1902
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001904 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1905 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 break;
1907
1908 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001909 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1910 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 break;
1912
1913 default:
1914 err = -EINVAL;
1915 break;
1916 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001917
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001918done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 hci_dev_put(hdev);
1920 return err;
1921}
1922
1923int hci_get_dev_list(void __user *arg)
1924{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001925 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 struct hci_dev_list_req *dl;
1927 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 int n = 0, size, err;
1929 __u16 dev_num;
1930
1931 if (get_user(dev_num, (__u16 __user *) arg))
1932 return -EFAULT;
1933
1934 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1935 return -EINVAL;
1936
1937 size = sizeof(*dl) + dev_num * sizeof(*dr);
1938
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001939 dl = kzalloc(size, GFP_KERNEL);
1940 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 return -ENOMEM;
1942
1943 dr = dl->dev_req;
1944
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001945 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001946 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001947 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001948
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001949 /* When the auto-off is configured it means the transport
1950 * is running, but in that case still indicate that the
1951 * device is actually down.
1952 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001953 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001954 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02001955
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001957 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001958
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 if (++n >= dev_num)
1960 break;
1961 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001962 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
1964 dl->dev_num = n;
1965 size = sizeof(*dl) + n * sizeof(*dr);
1966
1967 err = copy_to_user(arg, dl, size);
1968 kfree(dl);
1969
1970 return err ? -EFAULT : 0;
1971}
1972
1973int hci_get_dev_info(void __user *arg)
1974{
1975 struct hci_dev *hdev;
1976 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001977 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 int err = 0;
1979
1980 if (copy_from_user(&di, arg, sizeof(di)))
1981 return -EFAULT;
1982
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001983 hdev = hci_dev_get(di.dev_id);
1984 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 return -ENODEV;
1986
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001987 /* When the auto-off is configured it means the transport
1988 * is running, but in that case still indicate that the
1989 * device is actually down.
1990 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001991 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001992 flags = hdev->flags & ~BIT(HCI_UP);
1993 else
1994 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001995
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 strcpy(di.name, hdev->name);
1997 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001998 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02001999 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002001 if (lmp_bredr_capable(hdev)) {
2002 di.acl_mtu = hdev->acl_mtu;
2003 di.acl_pkts = hdev->acl_pkts;
2004 di.sco_mtu = hdev->sco_mtu;
2005 di.sco_pkts = hdev->sco_pkts;
2006 } else {
2007 di.acl_mtu = hdev->le_mtu;
2008 di.acl_pkts = hdev->le_pkts;
2009 di.sco_mtu = 0;
2010 di.sco_pkts = 0;
2011 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 di.link_policy = hdev->link_policy;
2013 di.link_mode = hdev->link_mode;
2014
2015 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2016 memcpy(&di.features, &hdev->features, sizeof(di.features));
2017
2018 if (copy_to_user(arg, &di, sizeof(di)))
2019 err = -EFAULT;
2020
2021 hci_dev_put(hdev);
2022
2023 return err;
2024}
2025
2026/* ---- Interface to HCI drivers ---- */
2027
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002028static int hci_rfkill_set_block(void *data, bool blocked)
2029{
2030 struct hci_dev *hdev = data;
2031
2032 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2033
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002034 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002035 return -EBUSY;
2036
Johan Hedberg5e130362013-09-13 08:58:17 +03002037 if (blocked) {
Marcel Holtmanna1536da2015-03-13 02:11:01 -07002038 hci_dev_set_flag(hdev, HCI_RFKILLED);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002039 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2040 !hci_dev_test_flag(hdev, HCI_CONFIG))
Johan Hedbergbf543032013-09-13 08:58:18 +03002041 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002042 } else {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002043 hci_dev_clear_flag(hdev, HCI_RFKILLED);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002044 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002045
2046 return 0;
2047}
2048
2049static const struct rfkill_ops hci_rfkill_ops = {
2050 .set_block = hci_rfkill_set_block,
2051};
2052
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002053static void hci_power_on(struct work_struct *work)
2054{
2055 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002056 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002057
2058 BT_DBG("%s", hdev->name);
2059
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002060 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002061 if (err < 0) {
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302062 hci_dev_lock(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002063 mgmt_set_powered_failed(hdev, err);
Jaganath Kanakkassery3ad67582014-12-11 11:43:12 +05302064 hci_dev_unlock(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002065 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002066 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002067
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002068 /* During the HCI setup phase, a few error conditions are
2069 * ignored and they need to be checked now. If they are still
2070 * valid, it is important to turn the device back off.
2071 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002072 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2073 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002074 (hdev->dev_type == HCI_BREDR &&
2075 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2076 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Marcel Holtmanna358dc12015-03-13 02:11:02 -07002077 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
Johan Hedbergbf543032013-09-13 08:58:18 +03002078 hci_dev_do_close(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002079 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002080 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2081 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002082 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002083
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002084 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002085 /* For unconfigured devices, set the HCI_RAW flag
2086 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002087 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002088 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann4a964402014-07-02 19:10:33 +02002089 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002090
2091 /* For fully configured devices, this will send
2092 * the Index Added event. For unconfigured devices,
2093 * it will send Unconfigued Index Added event.
2094 *
2095 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2096 * and no event will be send.
2097 */
Johan Hedberg744cf192011-11-08 20:40:14 +02002098 mgmt_index_added(hdev);
Marcel Holtmanna69d8922015-03-13 02:11:05 -07002099 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002100 /* When the controller is now configured, then it
2101 * is important to clear the HCI_RAW flag.
2102 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002103 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002104 clear_bit(HCI_RAW, &hdev->flags);
2105
Marcel Holtmannd603b76b2014-07-06 12:11:14 +02002106 /* Powering on the controller with HCI_CONFIG set only
2107 * happens with the transition from unconfigured to
2108 * configured. This will send the Index Added event.
2109 */
2110 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002111 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002112}
2113
2114static void hci_power_off(struct work_struct *work)
2115{
Johan Hedberg32435532011-11-07 22:16:04 +02002116 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002117 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002118
2119 BT_DBG("%s", hdev->name);
2120
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002121 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002122}
2123
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002124static void hci_error_reset(struct work_struct *work)
2125{
2126 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2127
2128 BT_DBG("%s", hdev->name);
2129
2130 if (hdev->hw_error)
2131 hdev->hw_error(hdev, hdev->hw_error_code);
2132 else
2133 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2134 hdev->hw_error_code);
2135
2136 if (hci_dev_do_close(hdev))
2137 return;
2138
Marcel Holtmannc7741d12015-01-28 11:09:55 -08002139 hci_dev_do_open(hdev);
2140}
2141
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002142static void hci_discov_off(struct work_struct *work)
2143{
2144 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002145
2146 hdev = container_of(work, struct hci_dev, discov_off.work);
2147
2148 BT_DBG("%s", hdev->name);
2149
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002150 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002151}
2152
Johan Hedberg35f74982014-02-18 17:14:32 +02002153void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002154{
Johan Hedberg48210022013-01-27 00:31:28 +02002155 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002156
Johan Hedberg48210022013-01-27 00:31:28 +02002157 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2158 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002159 kfree(uuid);
2160 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002161}
2162
Johan Hedberg35f74982014-02-18 17:14:32 +02002163void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002164{
Johan Hedberg0378b592014-11-19 15:22:22 +02002165 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002166
Johan Hedberg0378b592014-11-19 15:22:22 +02002167 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2168 list_del_rcu(&key->list);
2169 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002170 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002171}
2172
Johan Hedberg35f74982014-02-18 17:14:32 +02002173void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002174{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002175 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002176
Johan Hedberg970d0f12014-11-13 14:37:47 +02002177 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2178 list_del_rcu(&k->list);
2179 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002180 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002181}
2182
Johan Hedberg970c4e42014-02-18 10:19:33 +02002183void hci_smp_irks_clear(struct hci_dev *hdev)
2184{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002185 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002186
Johan Hedbergadae20c2014-11-13 14:37:48 +02002187 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2188 list_del_rcu(&k->list);
2189 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002190 }
2191}
2192
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002193struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2194{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002195 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002196
Johan Hedberg0378b592014-11-19 15:22:22 +02002197 rcu_read_lock();
2198 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2199 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2200 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002201 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02002202 }
2203 }
2204 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002205
2206 return NULL;
2207}
2208
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302209static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002210 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002211{
2212 /* Legacy key */
2213 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302214 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002215
2216 /* Debug keys are insecure so don't store them persistently */
2217 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302218 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002219
2220 /* Changed combination key and there's no previous one */
2221 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302222 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002223
2224 /* Security mode 3 case */
2225 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302226 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002227
Johan Hedberge3befab2014-06-01 16:33:39 +03002228 /* BR/EDR key derived using SC from an LE link */
2229 if (conn->type == LE_LINK)
2230 return true;
2231
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002232 /* Neither local nor remote side had no-bonding as requirement */
2233 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302234 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002235
2236 /* Local side had dedicated bonding as requirement */
2237 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302238 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002239
2240 /* Remote side had dedicated bonding as requirement */
2241 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302242 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002243
2244 /* If none of the above criteria match, then don't store the key
2245 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302246 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002247}
2248
Johan Hedberge804d252014-07-16 11:42:28 +03002249static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08002250{
Johan Hedberge804d252014-07-16 11:42:28 +03002251 if (type == SMP_LTK)
2252 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002253
Johan Hedberge804d252014-07-16 11:42:28 +03002254 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002255}
2256
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002257struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2258 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002259{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002260 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002261
Johan Hedberg970d0f12014-11-13 14:37:47 +02002262 rcu_read_lock();
2263 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03002264 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2265 continue;
2266
Johan Hedberg923e2412014-12-03 12:43:39 +02002267 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02002268 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002269 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002270 }
2271 }
2272 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002273
2274 return NULL;
2275}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002276
Johan Hedberg970c4e42014-02-18 10:19:33 +02002277struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2278{
2279 struct smp_irk *irk;
2280
Johan Hedbergadae20c2014-11-13 14:37:48 +02002281 rcu_read_lock();
2282 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2283 if (!bacmp(&irk->rpa, rpa)) {
2284 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002285 return irk;
2286 }
2287 }
2288
Johan Hedbergadae20c2014-11-13 14:37:48 +02002289 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2290 if (smp_irk_matches(hdev, irk->val, rpa)) {
2291 bacpy(&irk->rpa, rpa);
2292 rcu_read_unlock();
2293 return irk;
2294 }
2295 }
2296 rcu_read_unlock();
2297
Johan Hedberg970c4e42014-02-18 10:19:33 +02002298 return NULL;
2299}
2300
2301struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2302 u8 addr_type)
2303{
2304 struct smp_irk *irk;
2305
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002306 /* Identity Address must be public or static random */
2307 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2308 return NULL;
2309
Johan Hedbergadae20c2014-11-13 14:37:48 +02002310 rcu_read_lock();
2311 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02002312 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02002313 bacmp(bdaddr, &irk->bdaddr) == 0) {
2314 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002315 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02002316 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02002317 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02002318 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02002319
2320 return NULL;
2321}
2322
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002323struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002324 bdaddr_t *bdaddr, u8 *val, u8 type,
2325 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002326{
2327 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302328 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002329
2330 old_key = hci_find_link_key(hdev, bdaddr);
2331 if (old_key) {
2332 old_key_type = old_key->type;
2333 key = old_key;
2334 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002335 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002336 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002337 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002338 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02002339 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002340 }
2341
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002342 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002343
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002344 /* Some buggy controller combinations generate a changed
2345 * combination key for legacy pairing even when there's no
2346 * previous key */
2347 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002348 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002349 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002350 if (conn)
2351 conn->key_type = type;
2352 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002353
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002354 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002355 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002356 key->pin_len = pin_len;
2357
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002358 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002359 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002360 else
2361 key->type = type;
2362
Johan Hedberg7652ff62014-06-24 13:15:49 +03002363 if (persistent)
2364 *persistent = hci_persistent_key(hdev, conn, type,
2365 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002366
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002367 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002368}
2369
Johan Hedbergca9142b2014-02-19 14:57:44 +02002370struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002371 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002372 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002373{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002374 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03002375 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002376
Johan Hedbergf3a73d92014-05-29 15:02:59 +03002377 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002378 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002379 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002380 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002381 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002382 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002383 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02002384 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002385 }
2386
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002387 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002388 key->bdaddr_type = addr_type;
2389 memcpy(key->val, tk, sizeof(key->val));
2390 key->authenticated = authenticated;
2391 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002392 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002393 key->enc_size = enc_size;
2394 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002395
Johan Hedbergca9142b2014-02-19 14:57:44 +02002396 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002397}
2398
Johan Hedbergca9142b2014-02-19 14:57:44 +02002399struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2400 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02002401{
2402 struct smp_irk *irk;
2403
2404 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2405 if (!irk) {
2406 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2407 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002408 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002409
2410 bacpy(&irk->bdaddr, bdaddr);
2411 irk->addr_type = addr_type;
2412
Johan Hedbergadae20c2014-11-13 14:37:48 +02002413 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02002414 }
2415
2416 memcpy(irk->val, val, 16);
2417 bacpy(&irk->rpa, rpa);
2418
Johan Hedbergca9142b2014-02-19 14:57:44 +02002419 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02002420}
2421
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002422int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2423{
2424 struct link_key *key;
2425
2426 key = hci_find_link_key(hdev, bdaddr);
2427 if (!key)
2428 return -ENOENT;
2429
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002430 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002431
Johan Hedberg0378b592014-11-19 15:22:22 +02002432 list_del_rcu(&key->list);
2433 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002434
2435 return 0;
2436}
2437
Johan Hedberge0b2b272014-02-18 17:14:31 +02002438int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002439{
Johan Hedberg970d0f12014-11-13 14:37:47 +02002440 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002441 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002442
Johan Hedberg970d0f12014-11-13 14:37:47 +02002443 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02002444 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002445 continue;
2446
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002447 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002448
Johan Hedberg970d0f12014-11-13 14:37:47 +02002449 list_del_rcu(&k->list);
2450 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002451 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002452 }
2453
Johan Hedbergc51ffa02014-02-18 17:14:33 +02002454 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002455}
2456
Johan Hedberga7ec7332014-02-18 17:14:35 +02002457void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2458{
Johan Hedbergadae20c2014-11-13 14:37:48 +02002459 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02002460
Johan Hedbergadae20c2014-11-13 14:37:48 +02002461 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02002462 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2463 continue;
2464
2465 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2466
Johan Hedbergadae20c2014-11-13 14:37:48 +02002467 list_del_rcu(&k->list);
2468 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02002469 }
2470}
2471
Johan Hedberg55e76b32015-03-10 22:34:40 +02002472bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2473{
2474 struct smp_ltk *k;
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002475 struct smp_irk *irk;
Johan Hedberg55e76b32015-03-10 22:34:40 +02002476 u8 addr_type;
2477
2478 if (type == BDADDR_BREDR) {
2479 if (hci_find_link_key(hdev, bdaddr))
2480 return true;
2481 return false;
2482 }
2483
2484 /* Convert to HCI addr type which struct smp_ltk uses */
2485 if (type == BDADDR_LE_PUBLIC)
2486 addr_type = ADDR_LE_DEV_PUBLIC;
2487 else
2488 addr_type = ADDR_LE_DEV_RANDOM;
2489
Johan Hedberg4ba9faf2015-03-11 10:52:08 +02002490 irk = hci_get_irk(hdev, bdaddr, addr_type);
2491 if (irk) {
2492 bdaddr = &irk->bdaddr;
2493 addr_type = irk->addr_type;
2494 }
2495
Johan Hedberg55e76b32015-03-10 22:34:40 +02002496 rcu_read_lock();
2497 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg87c8b282015-03-11 08:55:51 +02002498 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2499 rcu_read_unlock();
Johan Hedberg55e76b32015-03-10 22:34:40 +02002500 return true;
Johan Hedberg87c8b282015-03-11 08:55:51 +02002501 }
Johan Hedberg55e76b32015-03-10 22:34:40 +02002502 }
2503 rcu_read_unlock();
2504
2505 return false;
2506}
2507
Ville Tervo6bd32322011-02-16 16:32:41 +02002508/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002509static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02002510{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002511 struct hci_dev *hdev = container_of(work, struct hci_dev,
2512 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002513
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002514 if (hdev->sent_cmd) {
2515 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2516 u16 opcode = __le16_to_cpu(sent->opcode);
2517
2518 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2519 } else {
2520 BT_ERR("%s command tx timeout", hdev->name);
2521 }
2522
Ville Tervo6bd32322011-02-16 16:32:41 +02002523 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002524 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002525}
2526
Szymon Janc2763eda2011-03-22 13:12:22 +01002527struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Johan Hedberg6928a922014-10-26 20:46:09 +01002528 bdaddr_t *bdaddr, u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002529{
2530 struct oob_data *data;
2531
Johan Hedberg6928a922014-10-26 20:46:09 +01002532 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2533 if (bacmp(bdaddr, &data->bdaddr) != 0)
2534 continue;
2535 if (data->bdaddr_type != bdaddr_type)
2536 continue;
2537 return data;
2538 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002539
2540 return NULL;
2541}
2542
Johan Hedberg6928a922014-10-26 20:46:09 +01002543int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2544 u8 bdaddr_type)
Szymon Janc2763eda2011-03-22 13:12:22 +01002545{
2546 struct oob_data *data;
2547
Johan Hedberg6928a922014-10-26 20:46:09 +01002548 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002549 if (!data)
2550 return -ENOENT;
2551
Johan Hedberg6928a922014-10-26 20:46:09 +01002552 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002553
2554 list_del(&data->list);
2555 kfree(data);
2556
2557 return 0;
2558}
2559
Johan Hedberg35f74982014-02-18 17:14:32 +02002560void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01002561{
2562 struct oob_data *data, *n;
2563
2564 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2565 list_del(&data->list);
2566 kfree(data);
2567 }
Szymon Janc2763eda2011-03-22 13:12:22 +01002568}
2569
Marcel Holtmann07988722014-01-10 02:07:29 -08002570int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg6928a922014-10-26 20:46:09 +01002571 u8 bdaddr_type, u8 *hash192, u8 *rand192,
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002572 u8 *hash256, u8 *rand256)
Szymon Janc2763eda2011-03-22 13:12:22 +01002573{
2574 struct oob_data *data;
2575
Johan Hedberg6928a922014-10-26 20:46:09 +01002576 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
Szymon Janc2763eda2011-03-22 13:12:22 +01002577 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002578 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01002579 if (!data)
2580 return -ENOMEM;
2581
2582 bacpy(&data->bdaddr, bdaddr);
Johan Hedberg6928a922014-10-26 20:46:09 +01002583 data->bdaddr_type = bdaddr_type;
Szymon Janc2763eda2011-03-22 13:12:22 +01002584 list_add(&data->list, &hdev->remote_oob_data);
2585 }
2586
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002587 if (hash192 && rand192) {
2588 memcpy(data->hash192, hash192, sizeof(data->hash192));
2589 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002590 if (hash256 && rand256)
2591 data->present = 0x03;
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002592 } else {
2593 memset(data->hash192, 0, sizeof(data->hash192));
2594 memset(data->rand192, 0, sizeof(data->rand192));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002595 if (hash256 && rand256)
2596 data->present = 0x02;
2597 else
2598 data->present = 0x00;
Marcel Holtmann07988722014-01-10 02:07:29 -08002599 }
2600
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002601 if (hash256 && rand256) {
2602 memcpy(data->hash256, hash256, sizeof(data->hash256));
2603 memcpy(data->rand256, rand256, sizeof(data->rand256));
2604 } else {
2605 memset(data->hash256, 0, sizeof(data->hash256));
2606 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmannf7697b12015-01-30 23:20:55 -08002607 if (hash192 && rand192)
2608 data->present = 0x01;
Johan Hedberg81328d5c2014-10-26 20:33:47 +01002609 }
Marcel Holtmann07988722014-01-10 02:07:29 -08002610
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002611 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002612
2613 return 0;
2614}
2615
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002616struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002617 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002618{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002619 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002620
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002621 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002622 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002623 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002624 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002625
2626 return NULL;
2627}
2628
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002629void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002630{
2631 struct list_head *p, *n;
2632
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002633 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002634 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002635
2636 list_del(p);
2637 kfree(b);
2638 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002639}
2640
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002641int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002642{
2643 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002644
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002645 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002646 return -EBADF;
2647
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002648 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002649 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002650
Johan Hedberg27f70f32014-07-21 10:50:06 +03002651 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002652 if (!entry)
2653 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002654
2655 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002656 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002657
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002658 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002659
2660 return 0;
2661}
2662
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002663int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002664{
2665 struct bdaddr_list *entry;
2666
Johan Hedberg35f74982014-02-18 17:14:32 +02002667 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002668 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02002669 return 0;
2670 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002671
Johan Hedbergdcc36c12014-07-09 12:59:13 +03002672 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08002673 if (!entry)
2674 return -ENOENT;
2675
2676 list_del(&entry->list);
2677 kfree(entry);
2678
2679 return 0;
2680}
2681
Andre Guedes15819a72014-02-03 13:56:18 -03002682/* This function requires the caller holds hdev->lock */
2683struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2684 bdaddr_t *addr, u8 addr_type)
2685{
2686 struct hci_conn_params *params;
2687
Johan Hedberg738f6182014-07-03 19:33:51 +03002688 /* The conn params list only contains identity addresses */
2689 if (!hci_is_identity_address(addr, addr_type))
2690 return NULL;
2691
Andre Guedes15819a72014-02-03 13:56:18 -03002692 list_for_each_entry(params, &hdev->le_conn_params, list) {
2693 if (bacmp(&params->addr, addr) == 0 &&
2694 params->addr_type == addr_type) {
2695 return params;
2696 }
2697 }
2698
2699 return NULL;
2700}
2701
2702/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03002703struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2704 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002705{
Johan Hedberg912b42e2014-07-03 19:33:49 +03002706 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03002707
Johan Hedberg738f6182014-07-03 19:33:51 +03002708 /* The list only contains identity addresses */
2709 if (!hci_is_identity_address(addr, addr_type))
2710 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002711
Johan Hedberg501f8822014-07-04 12:37:26 +03002712 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03002713 if (bacmp(&param->addr, addr) == 0 &&
2714 param->addr_type == addr_type)
2715 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02002716 }
2717
2718 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002719}
2720
2721/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002722struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2723 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03002724{
2725 struct hci_conn_params *params;
2726
Johan Hedbergc46245b2014-07-02 17:37:33 +03002727 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002728 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03002729
Andre Guedes15819a72014-02-03 13:56:18 -03002730 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03002731 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002732 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03002733
2734 params = kzalloc(sizeof(*params), GFP_KERNEL);
2735 if (!params) {
2736 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002737 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03002738 }
2739
2740 bacpy(&params->addr, addr);
2741 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03002742
2743 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03002744 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03002745
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002746 params->conn_min_interval = hdev->le_conn_min_interval;
2747 params->conn_max_interval = hdev->le_conn_max_interval;
2748 params->conn_latency = hdev->le_conn_latency;
2749 params->supervision_timeout = hdev->le_supv_timeout;
2750 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2751
2752 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2753
Marcel Holtmann51d167c2014-07-01 12:11:04 +02002754 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02002755}
2756
Johan Hedbergf6c63242014-08-15 21:06:59 +03002757static void hci_conn_params_free(struct hci_conn_params *params)
2758{
2759 if (params->conn) {
2760 hci_conn_drop(params->conn);
2761 hci_conn_put(params->conn);
2762 }
2763
2764 list_del(&params->action);
2765 list_del(&params->list);
2766 kfree(params);
2767}
2768
Andre Guedes15819a72014-02-03 13:56:18 -03002769/* This function requires the caller holds hdev->lock */
2770void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2771{
2772 struct hci_conn_params *params;
2773
2774 params = hci_conn_params_lookup(hdev, addr, addr_type);
2775 if (!params)
2776 return;
2777
Johan Hedbergf6c63242014-08-15 21:06:59 +03002778 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002779
Johan Hedberg95305ba2014-07-04 12:37:21 +03002780 hci_update_background_scan(hdev);
2781
Andre Guedes15819a72014-02-03 13:56:18 -03002782 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2783}
2784
2785/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a82014-07-02 17:37:26 +03002786void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002787{
2788 struct hci_conn_params *params, *tmp;
2789
2790 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a82014-07-02 17:37:26 +03002791 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2792 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03002793 list_del(&params->list);
2794 kfree(params);
2795 }
2796
Johan Hedberg55af49a82014-07-02 17:37:26 +03002797 BT_DBG("All LE disabled connection parameters were removed");
2798}
2799
2800/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03002801void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03002802{
2803 struct hci_conn_params *params, *tmp;
2804
Johan Hedbergf6c63242014-08-15 21:06:59 +03002805 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2806 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03002807
Johan Hedberga2f41a82014-07-04 12:37:19 +03002808 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02002809
Andre Guedes15819a72014-02-03 13:56:18 -03002810 BT_DBG("All LE connection parameters were removed");
2811}
2812
Marcel Holtmann1904a852015-01-11 13:50:44 -08002813static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002814{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002815 if (status) {
2816 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002817
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002818 hci_dev_lock(hdev);
2819 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2820 hci_dev_unlock(hdev);
2821 return;
2822 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002823}
2824
Marcel Holtmann1904a852015-01-11 13:50:44 -08002825static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2826 u16 opcode)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002827{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002828 /* General inquiry access code (GIAC) */
2829 u8 lap[3] = { 0x33, 0x8b, 0x9e };
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002830 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002831 int err;
2832
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002833 if (status) {
2834 BT_ERR("Failed to disable LE scanning: status %d", status);
2835 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002836 }
2837
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002838 hdev->discovery.scan_start = 0;
2839
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002840 switch (hdev->discovery.type) {
2841 case DISCOV_TYPE_LE:
2842 hci_dev_lock(hdev);
2843 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2844 hci_dev_unlock(hdev);
2845 break;
2846
2847 case DISCOV_TYPE_INTERLEAVED:
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002848 hci_dev_lock(hdev);
2849
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002850 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2851 &hdev->quirks)) {
2852 /* If we were running LE only scan, change discovery
2853 * state. If we were running both LE and BR/EDR inquiry
2854 * simultaneously, and BR/EDR inquiry is already
2855 * finished, stop discovery, otherwise BR/EDR inquiry
2856 * will stop discovery when finished.
2857 */
2858 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2859 hci_discovery_set_state(hdev,
2860 DISCOVERY_STOPPED);
2861 } else {
Johan Hedbergbaf880a2015-03-21 08:02:23 +02002862 struct hci_request req;
2863
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002864 hci_inquiry_cache_flush(hdev);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002865
Johan Hedbergbaf880a2015-03-21 08:02:23 +02002866 hci_req_init(&req, hdev);
2867
2868 memset(&cp, 0, sizeof(cp));
2869 memcpy(&cp.lap, lap, sizeof(cp.lap));
2870 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2871 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2872
Jakub Pawlowski07d23342015-03-17 09:04:14 -07002873 err = hci_req_run(&req, inquiry_complete);
2874 if (err) {
2875 BT_ERR("Inquiry request failed: err %d", err);
2876 hci_discovery_set_state(hdev,
2877 DISCOVERY_STOPPED);
2878 }
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002879 }
2880
2881 hci_dev_unlock(hdev);
2882 break;
2883 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002884}
2885
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002886static void le_scan_disable_work(struct work_struct *work)
2887{
2888 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002889 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002890 struct hci_request req;
2891 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002892
2893 BT_DBG("%s", hdev->name);
2894
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002895 cancel_delayed_work_sync(&hdev->le_scan_restart);
2896
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002897 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002898
Andre Guedesb1efcc22014-02-26 20:21:40 -03002899 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002900
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002901 err = hci_req_run(&req, le_scan_disable_work_complete);
2902 if (err)
2903 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002904}
2905
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002906static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2907 u16 opcode)
2908{
2909 unsigned long timeout, duration, scan_start, now;
2910
2911 BT_DBG("%s", hdev->name);
2912
2913 if (status) {
2914 BT_ERR("Failed to restart LE scan: status %d", status);
2915 return;
2916 }
2917
2918 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2919 !hdev->discovery.scan_start)
2920 return;
2921
2922 /* When the scan was started, hdev->le_scan_disable has been queued
2923 * after duration from scan_start. During scan restart this job
2924 * has been canceled, and we need to queue it again after proper
2925 * timeout, to make sure that scan does not run indefinitely.
2926 */
2927 duration = hdev->discovery.scan_duration;
2928 scan_start = hdev->discovery.scan_start;
2929 now = jiffies;
2930 if (now - scan_start <= duration) {
2931 int elapsed;
2932
2933 if (now >= scan_start)
2934 elapsed = now - scan_start;
2935 else
2936 elapsed = ULONG_MAX - scan_start + now;
2937
2938 timeout = duration - elapsed;
2939 } else {
2940 timeout = 0;
2941 }
2942 queue_delayed_work(hdev->workqueue,
2943 &hdev->le_scan_disable, timeout);
2944}
2945
2946static void le_scan_restart_work(struct work_struct *work)
2947{
2948 struct hci_dev *hdev = container_of(work, struct hci_dev,
2949 le_scan_restart.work);
2950 struct hci_request req;
2951 struct hci_cp_le_set_scan_enable cp;
2952 int err;
2953
2954 BT_DBG("%s", hdev->name);
2955
2956 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002957 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08002958 return;
2959
2960 hci_req_init(&req, hdev);
2961
2962 hci_req_add_le_scan_disable(&req);
2963
2964 memset(&cp, 0, sizeof(cp));
2965 cp.enable = LE_SCAN_ENABLE;
2966 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2967 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2968
2969 err = hci_req_run(&req, le_scan_restart_work_complete);
2970 if (err)
2971 BT_ERR("Restart LE scan request failed: err %d", err);
2972}
2973
Johan Hedberga1f4c312014-02-27 14:05:41 +02002974/* Copy the Identity Address of the controller.
2975 *
2976 * If the controller has a public BD_ADDR, then by default use that one.
2977 * If this is a LE only controller without a public address, default to
2978 * the static random address.
2979 *
2980 * For debugging purposes it is possible to force controllers with a
2981 * public address to use the static random address instead.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002982 *
2983 * In case BR/EDR has been disabled on a dual-mode controller and
2984 * userspace has configured a static address, then that address
2985 * becomes the identity address instead of the public BR/EDR address.
Johan Hedberga1f4c312014-02-27 14:05:41 +02002986 */
2987void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2988 u8 *bdaddr_type)
2989{
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07002990 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002991 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07002992 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01002993 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberga1f4c312014-02-27 14:05:41 +02002994 bacpy(bdaddr, &hdev->static_addr);
2995 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2996 } else {
2997 bacpy(bdaddr, &hdev->bdaddr);
2998 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2999 }
3000}
3001
David Herrmann9be0dab2012-04-22 14:39:57 +02003002/* Alloc HCI device */
3003struct hci_dev *hci_alloc_dev(void)
3004{
3005 struct hci_dev *hdev;
3006
Johan Hedberg27f70f32014-07-21 10:50:06 +03003007 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003008 if (!hdev)
3009 return NULL;
3010
David Herrmannb1b813d2012-04-22 14:39:58 +02003011 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3012 hdev->esco_type = (ESCO_HV1);
3013 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003014 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3015 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003016 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003017 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3018 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003019
David Herrmannb1b813d2012-04-22 14:39:58 +02003020 hdev->sniff_max_interval = 800;
3021 hdev->sniff_min_interval = 80;
3022
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003023 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003024 hdev->le_adv_min_interval = 0x0800;
3025 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003026 hdev->le_scan_interval = 0x0060;
3027 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003028 hdev->le_conn_min_interval = 0x0028;
3029 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003030 hdev->le_conn_latency = 0x0000;
3031 hdev->le_supv_timeout = 0x002a;
Marcel Holtmanna8e1bfa2014-12-20 16:28:40 +01003032 hdev->le_def_tx_len = 0x001b;
3033 hdev->le_def_tx_time = 0x0148;
3034 hdev->le_max_tx_len = 0x001b;
3035 hdev->le_max_tx_time = 0x0148;
3036 hdev->le_max_rx_len = 0x001b;
3037 hdev->le_max_rx_time = 0x0148;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003038
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003039 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003040 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003041 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3042 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003043
David Herrmannb1b813d2012-04-22 14:39:58 +02003044 mutex_init(&hdev->lock);
3045 mutex_init(&hdev->req_lock);
3046
3047 INIT_LIST_HEAD(&hdev->mgmt_pending);
3048 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003049 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003050 INIT_LIST_HEAD(&hdev->uuids);
3051 INIT_LIST_HEAD(&hdev->link_keys);
3052 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003053 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003054 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003055 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003056 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003057 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003058 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003059 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003060
3061 INIT_WORK(&hdev->rx_work, hci_rx_work);
3062 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3063 INIT_WORK(&hdev->tx_work, hci_tx_work);
3064 INIT_WORK(&hdev->power_on, hci_power_on);
Marcel Holtmannc7741d12015-01-28 11:09:55 -08003065 INIT_WORK(&hdev->error_reset, hci_error_reset);
David Herrmannb1b813d2012-04-22 14:39:58 +02003066
David Herrmannb1b813d2012-04-22 14:39:58 +02003067 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3068 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3069 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
Jakub Pawlowski2d28cfe2015-02-01 23:07:54 -08003070 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
David Herrmannb1b813d2012-04-22 14:39:58 +02003071
David Herrmannb1b813d2012-04-22 14:39:58 +02003072 skb_queue_head_init(&hdev->rx_q);
3073 skb_queue_head_init(&hdev->cmd_q);
3074 skb_queue_head_init(&hdev->raw_q);
3075
3076 init_waitqueue_head(&hdev->req_wait_q);
3077
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003078 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003079
David Herrmannb1b813d2012-04-22 14:39:58 +02003080 hci_init_sysfs(hdev);
3081 discovery_init(hdev);
Arman Uguray203fea02015-03-23 15:57:11 -07003082 adv_info_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003083
3084 return hdev;
3085}
3086EXPORT_SYMBOL(hci_alloc_dev);
3087
3088/* Free HCI device */
3089void hci_free_dev(struct hci_dev *hdev)
3090{
David Herrmann9be0dab2012-04-22 14:39:57 +02003091 /* will free via device release */
3092 put_device(&hdev->dev);
3093}
3094EXPORT_SYMBOL(hci_free_dev);
3095
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096/* Register HCI device */
3097int hci_register_dev(struct hci_dev *hdev)
3098{
David Herrmannb1b813d2012-04-22 14:39:58 +02003099 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100
Marcel Holtmann74292d52014-07-06 15:50:27 +02003101 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102 return -EINVAL;
3103
Mat Martineau08add512011-11-02 16:18:36 -07003104 /* Do not allow HCI_AMP devices to register at index 0,
3105 * so the index can be used as the AMP controller ID.
3106 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003107 switch (hdev->dev_type) {
3108 case HCI_BREDR:
3109 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3110 break;
3111 case HCI_AMP:
3112 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3113 break;
3114 default:
3115 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003117
Sasha Levin3df92b32012-05-27 22:36:56 +02003118 if (id < 0)
3119 return id;
3120
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 sprintf(hdev->name, "hci%d", id);
3122 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003123
3124 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3125
Kees Cookd8537542013-07-03 15:04:57 -07003126 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3127 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003128 if (!hdev->workqueue) {
3129 error = -ENOMEM;
3130 goto err;
3131 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003132
Kees Cookd8537542013-07-03 15:04:57 -07003133 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3134 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003135 if (!hdev->req_workqueue) {
3136 destroy_workqueue(hdev->workqueue);
3137 error = -ENOMEM;
3138 goto err;
3139 }
3140
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003141 if (!IS_ERR_OR_NULL(bt_debugfs))
3142 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3143
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003144 dev_set_name(&hdev->dev, "%s", hdev->name);
3145
3146 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003147 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03003148 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003150 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003151 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3152 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003153 if (hdev->rfkill) {
3154 if (rfkill_register(hdev->rfkill) < 0) {
3155 rfkill_destroy(hdev->rfkill);
3156 hdev->rfkill = NULL;
3157 }
3158 }
3159
Johan Hedberg5e130362013-09-13 08:58:17 +03003160 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003161 hci_dev_set_flag(hdev, HCI_RFKILLED);
Johan Hedberg5e130362013-09-13 08:58:17 +03003162
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003163 hci_dev_set_flag(hdev, HCI_SETUP);
3164 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003165
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003166 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003167 /* Assume BR/EDR support until proven otherwise (such as
3168 * through reading supported features during init.
3169 */
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003170 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
Johan Hedberg56f87902013-10-02 13:43:13 +03003171 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003172
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003173 write_lock(&hci_dev_list_lock);
3174 list_add(&hdev->list, &hci_dev_list);
3175 write_unlock(&hci_dev_list_lock);
3176
Marcel Holtmann4a964402014-07-02 19:10:33 +02003177 /* Devices that are marked for raw-only usage are unconfigured
3178 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003179 */
3180 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003181 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003182
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003184 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185
Johan Hedberg19202572013-01-14 22:33:51 +02003186 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003187
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003189
David Herrmann33ca9542011-10-08 14:58:49 +02003190err_wqueue:
3191 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003192 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003193err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003194 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003195
David Herrmann33ca9542011-10-08 14:58:49 +02003196 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197}
3198EXPORT_SYMBOL(hci_register_dev);
3199
3200/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003201void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202{
Marcel Holtmann2d7cc192015-04-04 21:59:27 -07003203 int id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003204
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003205 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206
Marcel Holtmanna1536da2015-03-13 02:11:01 -07003207 hci_dev_set_flag(hdev, HCI_UNREGISTER);
Johan Hovold94324962012-03-15 14:48:41 +01003208
Sasha Levin3df92b32012-05-27 22:36:56 +02003209 id = hdev->id;
3210
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003211 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003213 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214
3215 hci_dev_do_close(hdev);
3216
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003217 cancel_work_sync(&hdev->power_on);
3218
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003219 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003220 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3221 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003222 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003223 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003224 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003225 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003226
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003227 /* mgmt_index_removed should take care of emptying the
3228 * pending list */
3229 BUG_ON(!list_empty(&hdev->mgmt_pending));
3230
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231 hci_notify(hdev, HCI_DEV_UNREG);
3232
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003233 if (hdev->rfkill) {
3234 rfkill_unregister(hdev->rfkill);
3235 rfkill_destroy(hdev->rfkill);
3236 }
3237
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003238 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003239
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003240 debugfs_remove_recursive(hdev->debugfs);
3241
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003242 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003243 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003244
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003245 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003246 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003247 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003248 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003249 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003250 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003251 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003252 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003253 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03003254 hci_conn_params_clear_all(hdev);
Marcel Holtmann22078802014-12-05 11:45:22 +01003255 hci_discovery_filter_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003256 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003257
David Herrmanndc946bd2012-01-07 15:47:24 +01003258 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003259
3260 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261}
3262EXPORT_SYMBOL(hci_unregister_dev);
3263
3264/* Suspend HCI device */
3265int hci_suspend_dev(struct hci_dev *hdev)
3266{
3267 hci_notify(hdev, HCI_DEV_SUSPEND);
3268 return 0;
3269}
3270EXPORT_SYMBOL(hci_suspend_dev);
3271
3272/* Resume HCI device */
3273int hci_resume_dev(struct hci_dev *hdev)
3274{
3275 hci_notify(hdev, HCI_DEV_RESUME);
3276 return 0;
3277}
3278EXPORT_SYMBOL(hci_resume_dev);
3279
Marcel Holtmann75e05692014-11-02 08:15:38 +01003280/* Reset HCI device */
3281int hci_reset_dev(struct hci_dev *hdev)
3282{
3283 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3284 struct sk_buff *skb;
3285
3286 skb = bt_skb_alloc(3, GFP_ATOMIC);
3287 if (!skb)
3288 return -ENOMEM;
3289
3290 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3291 memcpy(skb_put(skb, 3), hw_err, 3);
3292
3293 /* Send Hardware Error to upper stack */
3294 return hci_recv_frame(hdev, skb);
3295}
3296EXPORT_SYMBOL(hci_reset_dev);
3297
Marcel Holtmann76bca882009-11-18 00:40:39 +01003298/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003299int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003300{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003301 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003302 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003303 kfree_skb(skb);
3304 return -ENXIO;
3305 }
3306
Jorrit Schippersd82603c2012-12-27 17:33:02 +01003307 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01003308 bt_cb(skb)->incoming = 1;
3309
3310 /* Time stamp */
3311 __net_timestamp(skb);
3312
Marcel Holtmann76bca882009-11-18 00:40:39 +01003313 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003314 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003315
Marcel Holtmann76bca882009-11-18 00:40:39 +01003316 return 0;
3317}
3318EXPORT_SYMBOL(hci_recv_frame);
3319
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320/* ---- Interface to upper protocols ---- */
3321
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322int hci_register_cb(struct hci_cb *cb)
3323{
3324 BT_DBG("%p name %s", cb, cb->name);
3325
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003326 mutex_lock(&hci_cb_list_lock);
Johan Hedberg00629e02015-02-18 14:53:54 +02003327 list_add_tail(&cb->list, &hci_cb_list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003328 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329
3330 return 0;
3331}
3332EXPORT_SYMBOL(hci_register_cb);
3333
3334int hci_unregister_cb(struct hci_cb *cb)
3335{
3336 BT_DBG("%p name %s", cb, cb->name);
3337
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003338 mutex_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 list_del(&cb->list);
Johan Hedbergfba7ecf2015-02-18 14:53:55 +02003340 mutex_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341
3342 return 0;
3343}
3344EXPORT_SYMBOL(hci_unregister_cb);
3345
Marcel Holtmann51086992013-10-10 14:54:19 -07003346static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003348 int err;
3349
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003350 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003352 /* Time stamp */
3353 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003355 /* Send copy to monitor */
3356 hci_send_to_monitor(hdev, skb);
3357
3358 if (atomic_read(&hdev->promisc)) {
3359 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003360 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361 }
3362
3363 /* Get rid of skb owner, prior to sending to the driver. */
3364 skb_orphan(skb);
3365
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02003366 err = hdev->send(hdev, skb);
3367 if (err < 0) {
3368 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3369 kfree_skb(skb);
3370 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371}
3372
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003373/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03003374int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3375 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02003376{
3377 struct sk_buff *skb;
3378
3379 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3380
3381 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3382 if (!skb) {
3383 BT_ERR("%s no memory for command", hdev->name);
3384 return -ENOMEM;
3385 }
3386
Stephen Hemminger49c922b2014-10-27 21:12:20 -07003387 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02003388 * single-command requests.
3389 */
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03003390 bt_cb(skb)->req.start = true;
Johan Hedberg11714b32013-03-05 20:37:47 +02003391
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003393 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394
3395 return 0;
3396}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397
3398/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003399void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003400{
3401 struct hci_command_hdr *hdr;
3402
3403 if (!hdev->sent_cmd)
3404 return NULL;
3405
3406 hdr = (void *) hdev->sent_cmd->data;
3407
Marcel Holtmanna9de9242007-10-20 13:33:56 +02003408 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 return NULL;
3410
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003411 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412
3413 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3414}
3415
3416/* Send ACL data */
3417static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3418{
3419 struct hci_acl_hdr *hdr;
3420 int len = skb->len;
3421
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003422 skb_push(skb, HCI_ACL_HDR_SIZE);
3423 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003424 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003425 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3426 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427}
3428
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003429static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003430 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003432 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433 struct hci_dev *hdev = conn->hdev;
3434 struct sk_buff *list;
3435
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003436 skb->len = skb_headlen(skb);
3437 skb->data_len = 0;
3438
3439 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003440
3441 switch (hdev->dev_type) {
3442 case HCI_BREDR:
3443 hci_add_acl_hdr(skb, conn->handle, flags);
3444 break;
3445 case HCI_AMP:
3446 hci_add_acl_hdr(skb, chan->handle, flags);
3447 break;
3448 default:
3449 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3450 return;
3451 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003452
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003453 list = skb_shinfo(skb)->frag_list;
3454 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003455 /* Non fragmented */
3456 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3457
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003458 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 } else {
3460 /* Fragmented */
3461 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3462
3463 skb_shinfo(skb)->frag_list = NULL;
3464
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003465 /* Queue all fragments atomically. We need to use spin_lock_bh
3466 * here because of 6LoWPAN links, as there this function is
3467 * called from softirq and using normal spin lock could cause
3468 * deadlocks.
3469 */
3470 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003472 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003473
3474 flags &= ~ACL_START;
3475 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 do {
3477 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003478
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003479 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003480 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481
3482 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3483
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003484 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 } while (list);
3486
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02003487 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003489}
3490
3491void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3492{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003493 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003494
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003495 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003496
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003497 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003499 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501
3502/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003503void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504{
3505 struct hci_dev *hdev = conn->hdev;
3506 struct hci_sco_hdr hdr;
3507
3508 BT_DBG("%s len %d", hdev->name, skb->len);
3509
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003510 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511 hdr.dlen = skb->len;
3512
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003513 skb_push(skb, HCI_SCO_HDR_SIZE);
3514 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003515 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003517 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003518
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003520 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522
3523/* ---- HCI TX task (outgoing data) ---- */
3524
3525/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003526static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3527 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528{
3529 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003530 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003531 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003533 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003535
3536 rcu_read_lock();
3537
3538 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003539 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003541
3542 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3543 continue;
3544
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545 num++;
3546
3547 if (c->sent < min) {
3548 min = c->sent;
3549 conn = c;
3550 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003551
3552 if (hci_conn_num(hdev, type) == num)
3553 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554 }
3555
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003556 rcu_read_unlock();
3557
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003559 int cnt, q;
3560
3561 switch (conn->type) {
3562 case ACL_LINK:
3563 cnt = hdev->acl_cnt;
3564 break;
3565 case SCO_LINK:
3566 case ESCO_LINK:
3567 cnt = hdev->sco_cnt;
3568 break;
3569 case LE_LINK:
3570 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3571 break;
3572 default:
3573 cnt = 0;
3574 BT_ERR("Unknown link type");
3575 }
3576
3577 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578 *quote = q ? q : 1;
3579 } else
3580 *quote = 0;
3581
3582 BT_DBG("conn %p quote %d", conn, *quote);
3583 return conn;
3584}
3585
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003586static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587{
3588 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003589 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590
Ville Tervobae1f5d92011-02-10 22:38:53 -03003591 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003593 rcu_read_lock();
3594
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003596 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003597 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003598 BT_ERR("%s killing stalled connection %pMR",
3599 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003600 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601 }
3602 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003603
3604 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605}
3606
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003607static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3608 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003609{
3610 struct hci_conn_hash *h = &hdev->conn_hash;
3611 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003612 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003613 struct hci_conn *conn;
3614 int cnt, q, conn_num = 0;
3615
3616 BT_DBG("%s", hdev->name);
3617
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003618 rcu_read_lock();
3619
3620 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003621 struct hci_chan *tmp;
3622
3623 if (conn->type != type)
3624 continue;
3625
3626 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3627 continue;
3628
3629 conn_num++;
3630
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003631 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003632 struct sk_buff *skb;
3633
3634 if (skb_queue_empty(&tmp->data_q))
3635 continue;
3636
3637 skb = skb_peek(&tmp->data_q);
3638 if (skb->priority < cur_prio)
3639 continue;
3640
3641 if (skb->priority > cur_prio) {
3642 num = 0;
3643 min = ~0;
3644 cur_prio = skb->priority;
3645 }
3646
3647 num++;
3648
3649 if (conn->sent < min) {
3650 min = conn->sent;
3651 chan = tmp;
3652 }
3653 }
3654
3655 if (hci_conn_num(hdev, type) == conn_num)
3656 break;
3657 }
3658
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003659 rcu_read_unlock();
3660
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003661 if (!chan)
3662 return NULL;
3663
3664 switch (chan->conn->type) {
3665 case ACL_LINK:
3666 cnt = hdev->acl_cnt;
3667 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003668 case AMP_LINK:
3669 cnt = hdev->block_cnt;
3670 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003671 case SCO_LINK:
3672 case ESCO_LINK:
3673 cnt = hdev->sco_cnt;
3674 break;
3675 case LE_LINK:
3676 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3677 break;
3678 default:
3679 cnt = 0;
3680 BT_ERR("Unknown link type");
3681 }
3682
3683 q = cnt / num;
3684 *quote = q ? q : 1;
3685 BT_DBG("chan %p quote %d", chan, *quote);
3686 return chan;
3687}
3688
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003689static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3690{
3691 struct hci_conn_hash *h = &hdev->conn_hash;
3692 struct hci_conn *conn;
3693 int num = 0;
3694
3695 BT_DBG("%s", hdev->name);
3696
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003697 rcu_read_lock();
3698
3699 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003700 struct hci_chan *chan;
3701
3702 if (conn->type != type)
3703 continue;
3704
3705 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3706 continue;
3707
3708 num++;
3709
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003710 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003711 struct sk_buff *skb;
3712
3713 if (chan->sent) {
3714 chan->sent = 0;
3715 continue;
3716 }
3717
3718 if (skb_queue_empty(&chan->data_q))
3719 continue;
3720
3721 skb = skb_peek(&chan->data_q);
3722 if (skb->priority >= HCI_PRIO_MAX - 1)
3723 continue;
3724
3725 skb->priority = HCI_PRIO_MAX - 1;
3726
3727 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003728 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003729 }
3730
3731 if (hci_conn_num(hdev, type) == num)
3732 break;
3733 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003734
3735 rcu_read_unlock();
3736
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003737}
3738
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003739static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3740{
3741 /* Calculate count of blocks used by this packet */
3742 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3743}
3744
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003745static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746{
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003747 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748 /* ACL tx timeout must be longer than maximum
3749 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003750 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003751 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003752 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003754}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003756static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003757{
3758 unsigned int cnt = hdev->acl_cnt;
3759 struct hci_chan *chan;
3760 struct sk_buff *skb;
3761 int quote;
3762
3763 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003764
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003765 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003766 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003767 u32 priority = (skb_peek(&chan->data_q))->priority;
3768 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003769 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003770 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003771
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003772 /* Stop if priority has changed */
3773 if (skb->priority < priority)
3774 break;
3775
3776 skb = skb_dequeue(&chan->data_q);
3777
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003778 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003779 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003780
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003781 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782 hdev->acl_last_tx = jiffies;
3783
3784 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003785 chan->sent++;
3786 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787 }
3788 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003789
3790 if (cnt != hdev->acl_cnt)
3791 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792}
3793
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003794static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003795{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003796 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003797 struct hci_chan *chan;
3798 struct sk_buff *skb;
3799 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003800 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003801
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003802 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003803
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003804 BT_DBG("%s", hdev->name);
3805
3806 if (hdev->dev_type == HCI_AMP)
3807 type = AMP_LINK;
3808 else
3809 type = ACL_LINK;
3810
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003811 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003812 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003813 u32 priority = (skb_peek(&chan->data_q))->priority;
3814 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3815 int blocks;
3816
3817 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003818 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003819
3820 /* Stop if priority has changed */
3821 if (skb->priority < priority)
3822 break;
3823
3824 skb = skb_dequeue(&chan->data_q);
3825
3826 blocks = __get_blocks(hdev, skb);
3827 if (blocks > hdev->block_cnt)
3828 return;
3829
3830 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003831 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003832
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003833 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003834 hdev->acl_last_tx = jiffies;
3835
3836 hdev->block_cnt -= blocks;
3837 quote -= blocks;
3838
3839 chan->sent += blocks;
3840 chan->conn->sent += blocks;
3841 }
3842 }
3843
3844 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003845 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003846}
3847
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003848static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003849{
3850 BT_DBG("%s", hdev->name);
3851
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003852 /* No ACL link over BR/EDR controller */
3853 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3854 return;
3855
3856 /* No AMP link over AMP controller */
3857 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003858 return;
3859
3860 switch (hdev->flow_ctl_mode) {
3861 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3862 hci_sched_acl_pkt(hdev);
3863 break;
3864
3865 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3866 hci_sched_acl_blk(hdev);
3867 break;
3868 }
3869}
3870
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003872static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873{
3874 struct hci_conn *conn;
3875 struct sk_buff *skb;
3876 int quote;
3877
3878 BT_DBG("%s", hdev->name);
3879
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003880 if (!hci_conn_num(hdev, SCO_LINK))
3881 return;
3882
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3884 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3885 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003886 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887
3888 conn->sent++;
3889 if (conn->sent == ~0)
3890 conn->sent = 0;
3891 }
3892 }
3893}
3894
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003895static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003896{
3897 struct hci_conn *conn;
3898 struct sk_buff *skb;
3899 int quote;
3900
3901 BT_DBG("%s", hdev->name);
3902
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003903 if (!hci_conn_num(hdev, ESCO_LINK))
3904 return;
3905
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003906 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3907 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003908 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3909 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003910 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003911
3912 conn->sent++;
3913 if (conn->sent == ~0)
3914 conn->sent = 0;
3915 }
3916 }
3917}
3918
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003919static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003920{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003921 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003922 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003923 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003924
3925 BT_DBG("%s", hdev->name);
3926
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003927 if (!hci_conn_num(hdev, LE_LINK))
3928 return;
3929
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003930 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003931 /* LE tx timeout must be longer than maximum
3932 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003933 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003934 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003935 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003936 }
3937
3938 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003939 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003940 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003941 u32 priority = (skb_peek(&chan->data_q))->priority;
3942 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003943 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003944 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003945
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003946 /* Stop if priority has changed */
3947 if (skb->priority < priority)
3948 break;
3949
3950 skb = skb_dequeue(&chan->data_q);
3951
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003952 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003953 hdev->le_last_tx = jiffies;
3954
3955 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003956 chan->sent++;
3957 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003958 }
3959 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003960
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003961 if (hdev->le_pkts)
3962 hdev->le_cnt = cnt;
3963 else
3964 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003965
3966 if (cnt != tmp)
3967 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003968}
3969
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003970static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003972 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973 struct sk_buff *skb;
3974
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003975 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003976 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003977
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003978 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann52de5992013-09-03 18:08:38 -07003979 /* Schedule queues and send stuff to HCI driver */
3980 hci_sched_acl(hdev);
3981 hci_sched_sco(hdev);
3982 hci_sched_esco(hdev);
3983 hci_sched_le(hdev);
3984 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003985
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986 /* Send next queued raw (unknown type) packet */
3987 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003988 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989}
3990
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003991/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992
3993/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003994static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995{
3996 struct hci_acl_hdr *hdr = (void *) skb->data;
3997 struct hci_conn *conn;
3998 __u16 handle, flags;
3999
4000 skb_pull(skb, HCI_ACL_HDR_SIZE);
4001
4002 handle = __le16_to_cpu(hdr->handle);
4003 flags = hci_flags(handle);
4004 handle = hci_handle(handle);
4005
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004006 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004007 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008
4009 hdev->stat.acl_rx++;
4010
4011 hci_dev_lock(hdev);
4012 conn = hci_conn_hash_lookup_handle(hdev, handle);
4013 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004014
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004016 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004017
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004019 l2cap_recv_acldata(conn, skb, flags);
4020 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004021 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004022 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004023 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024 }
4025
4026 kfree_skb(skb);
4027}
4028
4029/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03004030static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031{
4032 struct hci_sco_hdr *hdr = (void *) skb->data;
4033 struct hci_conn *conn;
4034 __u16 handle;
4035
4036 skb_pull(skb, HCI_SCO_HDR_SIZE);
4037
4038 handle = __le16_to_cpu(hdr->handle);
4039
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004040 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041
4042 hdev->stat.sco_rx++;
4043
4044 hci_dev_lock(hdev);
4045 conn = hci_conn_hash_lookup_handle(hdev, handle);
4046 hci_dev_unlock(hdev);
4047
4048 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004050 sco_recv_scodata(conn, skb);
4051 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004053 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004054 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055 }
4056
4057 kfree_skb(skb);
4058}
4059
Johan Hedberg9238f362013-03-05 20:37:48 +02004060static bool hci_req_is_complete(struct hci_dev *hdev)
4061{
4062 struct sk_buff *skb;
4063
4064 skb = skb_peek(&hdev->cmd_q);
4065 if (!skb)
4066 return true;
4067
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03004068 return bt_cb(skb)->req.start;
Johan Hedberg9238f362013-03-05 20:37:48 +02004069}
4070
Johan Hedberg42c6b122013-03-05 20:37:49 +02004071static void hci_resend_last(struct hci_dev *hdev)
4072{
4073 struct hci_command_hdr *sent;
4074 struct sk_buff *skb;
4075 u16 opcode;
4076
4077 if (!hdev->sent_cmd)
4078 return;
4079
4080 sent = (void *) hdev->sent_cmd->data;
4081 opcode = __le16_to_cpu(sent->opcode);
4082 if (opcode == HCI_OP_RESET)
4083 return;
4084
4085 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4086 if (!skb)
4087 return;
4088
4089 skb_queue_head(&hdev->cmd_q, skb);
4090 queue_work(hdev->workqueue, &hdev->cmd_work);
4091}
4092
Johan Hedberge62144872015-04-02 13:41:08 +03004093void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4094 hci_req_complete_t *req_complete,
4095 hci_req_complete_skb_t *req_complete_skb)
Johan Hedberg9238f362013-03-05 20:37:48 +02004096{
Johan Hedberg9238f362013-03-05 20:37:48 +02004097 struct sk_buff *skb;
4098 unsigned long flags;
4099
4100 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4101
Johan Hedberg42c6b122013-03-05 20:37:49 +02004102 /* If the completed command doesn't match the last one that was
4103 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02004104 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02004105 if (!hci_sent_cmd_data(hdev, opcode)) {
4106 /* Some CSR based controllers generate a spontaneous
4107 * reset complete event during init and any pending
4108 * command will never be completed. In such a case we
4109 * need to resend whatever was the last sent
4110 * command.
4111 */
4112 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4113 hci_resend_last(hdev);
4114
Johan Hedberg9238f362013-03-05 20:37:48 +02004115 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02004116 }
Johan Hedberg9238f362013-03-05 20:37:48 +02004117
4118 /* If the command succeeded and there's still more commands in
4119 * this request the request is not yet complete.
4120 */
4121 if (!status && !hci_req_is_complete(hdev))
4122 return;
4123
4124 /* If this was the last command in a request the complete
4125 * callback would be found in hdev->sent_cmd instead of the
4126 * command queue (hdev->cmd_q).
4127 */
Johan Hedberge62144872015-04-02 13:41:08 +03004128 if (bt_cb(hdev->sent_cmd)->req.complete) {
4129 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4130 return;
4131 }
Johan Hedberg53e21fb2013-07-27 14:11:14 -05004132
Johan Hedberge62144872015-04-02 13:41:08 +03004133 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4134 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4135 return;
Johan Hedberg9238f362013-03-05 20:37:48 +02004136 }
4137
4138 /* Remove all pending commands belonging to this request */
4139 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4140 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
Johan Hedbergdb6e3e82015-03-30 23:21:02 +03004141 if (bt_cb(skb)->req.start) {
Johan Hedberg9238f362013-03-05 20:37:48 +02004142 __skb_queue_head(&hdev->cmd_q, skb);
4143 break;
4144 }
4145
Johan Hedberge62144872015-04-02 13:41:08 +03004146 *req_complete = bt_cb(skb)->req.complete;
4147 *req_complete_skb = bt_cb(skb)->req.complete_skb;
Johan Hedberg9238f362013-03-05 20:37:48 +02004148 kfree_skb(skb);
4149 }
4150 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
Johan Hedberg9238f362013-03-05 20:37:48 +02004151}
4152
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004153static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004155 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156 struct sk_buff *skb;
4157
4158 BT_DBG("%s", hdev->name);
4159
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004161 /* Send copy to monitor */
4162 hci_send_to_monitor(hdev, skb);
4163
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164 if (atomic_read(&hdev->promisc)) {
4165 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004166 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167 }
4168
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07004169 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 kfree_skb(skb);
4171 continue;
4172 }
4173
4174 if (test_bit(HCI_INIT, &hdev->flags)) {
4175 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004176 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177 case HCI_ACLDATA_PKT:
4178 case HCI_SCODATA_PKT:
4179 kfree_skb(skb);
4180 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182 }
4183
4184 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004185 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004187 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 hci_event_packet(hdev, skb);
4189 break;
4190
4191 case HCI_ACLDATA_PKT:
4192 BT_DBG("%s ACL data packet", hdev->name);
4193 hci_acldata_packet(hdev, skb);
4194 break;
4195
4196 case HCI_SCODATA_PKT:
4197 BT_DBG("%s SCO data packet", hdev->name);
4198 hci_scodata_packet(hdev, skb);
4199 break;
4200
4201 default:
4202 kfree_skb(skb);
4203 break;
4204 }
4205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206}
4207
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004208static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004210 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211 struct sk_buff *skb;
4212
Andrei Emeltchenko21047862012-07-10 15:27:47 +03004213 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4214 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02004217 if (atomic_read(&hdev->cmd_cnt)) {
4218 skb = skb_dequeue(&hdev->cmd_q);
4219 if (!skb)
4220 return;
4221
Wei Yongjun7585b972009-02-25 18:29:52 +08004222 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07004224 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004225 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004227 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004228 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004229 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02004230 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004231 schedule_delayed_work(&hdev->cmd_timer,
4232 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233 } else {
4234 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004235 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236 }
4237 }
4238}