blob: 3c1014c2dcde02af9af58a9ee5e7dcaf7e112938 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070058/* ---- HCI debugfs entries ---- */
59
60static int inquiry_cache_show(struct seq_file *f, void *p)
61{
62 struct hci_dev *hdev = f->private;
63 struct discovery_state *cache = &hdev->discovery;
64 struct inquiry_entry *e;
65
66 hci_dev_lock(hdev);
67
68 list_for_each_entry(e, &cache->all, all) {
69 struct inquiry_data *data = &e->data;
70 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
71 &data->bdaddr,
72 data->pscan_rep_mode, data->pscan_period_mode,
73 data->pscan_mode, data->dev_class[2],
74 data->dev_class[1], data->dev_class[0],
75 __le16_to_cpu(data->clock_offset),
76 data->rssi, data->ssp_mode, e->timestamp);
77 }
78
79 hci_dev_unlock(hdev);
80
81 return 0;
82}
83
84static int inquiry_cache_open(struct inode *inode, struct file *file)
85{
86 return single_open(file, inquiry_cache_show, inode->i_private);
87}
88
89static const struct file_operations inquiry_cache_fops = {
90 .open = inquiry_cache_open,
91 .read = seq_read,
92 .llseek = seq_lseek,
93 .release = single_release,
94};
95
Marcel Holtmannebd1e332013-10-17 10:54:46 -070096static int auto_accept_delay_set(void *data, u64 val)
97{
98 struct hci_dev *hdev = data;
99
100 hci_dev_lock(hdev);
101 hdev->auto_accept_delay = val;
102 hci_dev_unlock(hdev);
103
104 return 0;
105}
106
107static int auto_accept_delay_get(void *data, u64 *val)
108{
109 struct hci_dev *hdev = data;
110
111 hci_dev_lock(hdev);
112 *val = hdev->auto_accept_delay;
113 hci_dev_unlock(hdev);
114
115 return 0;
116}
117
118DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
119 auto_accept_delay_set, "%llu\n");
120
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700121static int static_address_show(struct seq_file *f, void *p)
122{
123 struct hci_dev *hdev = f->private;
124
125 hci_dev_lock(hdev);
126 seq_printf(f, "%pMR\n", &hdev->static_addr);
127 hci_dev_unlock(hdev);
128
129 return 0;
130}
131
132static int static_address_open(struct inode *inode, struct file *file)
133{
134 return single_open(file, static_address_show, inode->i_private);
135}
136
137static const struct file_operations static_address_fops = {
138 .open = static_address_open,
139 .read = seq_read,
140 .llseek = seq_lseek,
141 .release = single_release,
142};
143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144/* ---- HCI requests ---- */
145
Johan Hedberg42c6b122013-03-05 20:37:49 +0200146static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200148 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
150 if (hdev->req_status == HCI_REQ_PEND) {
151 hdev->req_result = result;
152 hdev->req_status = HCI_REQ_DONE;
153 wake_up_interruptible(&hdev->req_wait_q);
154 }
155}
156
157static void hci_req_cancel(struct hci_dev *hdev, int err)
158{
159 BT_DBG("%s err 0x%2.2x", hdev->name, err);
160
161 if (hdev->req_status == HCI_REQ_PEND) {
162 hdev->req_result = err;
163 hdev->req_status = HCI_REQ_CANCELED;
164 wake_up_interruptible(&hdev->req_wait_q);
165 }
166}
167
Fengguang Wu77a63e02013-04-20 16:24:31 +0300168static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
169 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300170{
171 struct hci_ev_cmd_complete *ev;
172 struct hci_event_hdr *hdr;
173 struct sk_buff *skb;
174
175 hci_dev_lock(hdev);
176
177 skb = hdev->recv_evt;
178 hdev->recv_evt = NULL;
179
180 hci_dev_unlock(hdev);
181
182 if (!skb)
183 return ERR_PTR(-ENODATA);
184
185 if (skb->len < sizeof(*hdr)) {
186 BT_ERR("Too short HCI event");
187 goto failed;
188 }
189
190 hdr = (void *) skb->data;
191 skb_pull(skb, HCI_EVENT_HDR_SIZE);
192
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193 if (event) {
194 if (hdr->evt != event)
195 goto failed;
196 return skb;
197 }
198
Johan Hedberg75e84b72013-04-02 13:35:04 +0300199 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
200 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
201 goto failed;
202 }
203
204 if (skb->len < sizeof(*ev)) {
205 BT_ERR("Too short cmd_complete event");
206 goto failed;
207 }
208
209 ev = (void *) skb->data;
210 skb_pull(skb, sizeof(*ev));
211
212 if (opcode == __le16_to_cpu(ev->opcode))
213 return skb;
214
215 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
216 __le16_to_cpu(ev->opcode));
217
218failed:
219 kfree_skb(skb);
220 return ERR_PTR(-ENODATA);
221}
222
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300223struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300224 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300225{
226 DECLARE_WAITQUEUE(wait, current);
227 struct hci_request req;
228 int err = 0;
229
230 BT_DBG("%s", hdev->name);
231
232 hci_req_init(&req, hdev);
233
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300234 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300235
236 hdev->req_status = HCI_REQ_PEND;
237
238 err = hci_req_run(&req, hci_req_sync_complete);
239 if (err < 0)
240 return ERR_PTR(err);
241
242 add_wait_queue(&hdev->req_wait_q, &wait);
243 set_current_state(TASK_INTERRUPTIBLE);
244
245 schedule_timeout(timeout);
246
247 remove_wait_queue(&hdev->req_wait_q, &wait);
248
249 if (signal_pending(current))
250 return ERR_PTR(-EINTR);
251
252 switch (hdev->req_status) {
253 case HCI_REQ_DONE:
254 err = -bt_to_errno(hdev->req_result);
255 break;
256
257 case HCI_REQ_CANCELED:
258 err = -hdev->req_result;
259 break;
260
261 default:
262 err = -ETIMEDOUT;
263 break;
264 }
265
266 hdev->req_status = hdev->req_result = 0;
267
268 BT_DBG("%s end: err %d", hdev->name, err);
269
270 if (err < 0)
271 return ERR_PTR(err);
272
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300273 return hci_get_cmd_complete(hdev, opcode, event);
274}
275EXPORT_SYMBOL(__hci_cmd_sync_ev);
276
277struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300278 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300279{
280 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300281}
282EXPORT_SYMBOL(__hci_cmd_sync);
283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200285static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200286 void (*func)(struct hci_request *req,
287 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200288 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200290 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 DECLARE_WAITQUEUE(wait, current);
292 int err = 0;
293
294 BT_DBG("%s start", hdev->name);
295
Johan Hedberg42c6b122013-03-05 20:37:49 +0200296 hci_req_init(&req, hdev);
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 hdev->req_status = HCI_REQ_PEND;
299
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200301
Johan Hedberg42c6b122013-03-05 20:37:49 +0200302 err = hci_req_run(&req, hci_req_sync_complete);
303 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200304 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300305
306 /* ENODATA means the HCI request command queue is empty.
307 * This can happen when a request with conditionals doesn't
308 * trigger any commands to be sent. This is normal behavior
309 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200310 */
Andre Guedes920c8302013-03-08 11:20:15 -0300311 if (err == -ENODATA)
312 return 0;
313
314 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200315 }
316
Andre Guedesbc4445c2013-03-08 11:20:13 -0300317 add_wait_queue(&hdev->req_wait_q, &wait);
318 set_current_state(TASK_INTERRUPTIBLE);
319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 schedule_timeout(timeout);
321
322 remove_wait_queue(&hdev->req_wait_q, &wait);
323
324 if (signal_pending(current))
325 return -EINTR;
326
327 switch (hdev->req_status) {
328 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700329 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 break;
331
332 case HCI_REQ_CANCELED:
333 err = -hdev->req_result;
334 break;
335
336 default:
337 err = -ETIMEDOUT;
338 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700339 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Johan Hedberga5040ef2011-01-10 13:28:59 +0200341 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
343 BT_DBG("%s end: err %d", hdev->name, err);
344
345 return err;
346}
347
Johan Hedberg01178cd2013-03-05 20:37:41 +0200348static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200349 void (*req)(struct hci_request *req,
350 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200351 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352{
353 int ret;
354
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200355 if (!test_bit(HCI_UP, &hdev->flags))
356 return -ENETDOWN;
357
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 /* Serialize all requests */
359 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200360 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 hci_req_unlock(hdev);
362
363 return ret;
364}
365
Johan Hedberg42c6b122013-03-05 20:37:49 +0200366static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200368 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
370 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200371 set_bit(HCI_RESET, &req->hdev->flags);
372 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373}
374
Johan Hedberg42c6b122013-03-05 20:37:49 +0200375static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200377 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200380 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200382 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200383 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200384
385 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200386 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387}
388
Johan Hedberg42c6b122013-03-05 20:37:49 +0200389static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200390{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200392
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200393 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300395
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700396 /* Read Local Supported Commands */
397 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
398
399 /* Read Local Supported Features */
400 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
401
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300402 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200403 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300404
405 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200406 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700407
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700408 /* Read Flow Control Mode */
409 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
410
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700411 /* Read Location Data */
412 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200413}
414
Johan Hedberg42c6b122013-03-05 20:37:49 +0200415static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200416{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200417 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200418
419 BT_DBG("%s %ld", hdev->name, opt);
420
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300421 /* Reset */
422 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200423 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300424
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200425 switch (hdev->dev_type) {
426 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200428 break;
429
430 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200432 break;
433
434 default:
435 BT_ERR("Unknown device type %d", hdev->dev_type);
436 break;
437 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200438}
439
Johan Hedberg42c6b122013-03-05 20:37:49 +0200440static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200441{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700442 struct hci_dev *hdev = req->hdev;
443
Johan Hedberg2177bab2013-03-05 20:37:43 +0200444 __le16 param;
445 __u8 flt_type;
446
447 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200448 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200449
450 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200451 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200452
453 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200454 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200455
456 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200457 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200458
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700459 /* Read Number of Supported IAC */
460 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
461
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700462 /* Read Current IAC LAP */
463 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
464
Johan Hedberg2177bab2013-03-05 20:37:43 +0200465 /* Clear Event Filters */
466 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200467 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200468
469 /* Connection accept timeout ~20 secs */
470 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200471 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200472
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700473 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
474 * but it does not support page scan related HCI commands.
475 */
476 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500477 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
478 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
479 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480}
481
Johan Hedberg42c6b122013-03-05 20:37:49 +0200482static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200483{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300484 struct hci_dev *hdev = req->hdev;
485
Johan Hedberg2177bab2013-03-05 20:37:43 +0200486 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200487 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200488
489 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491
492 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200493 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200494
495 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200496 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200497
498 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200499 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300500
501 /* LE-only controllers have LE implicitly enabled */
502 if (!lmp_bredr_capable(hdev))
503 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200504}
505
506static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
507{
508 if (lmp_ext_inq_capable(hdev))
509 return 0x02;
510
511 if (lmp_inq_rssi_capable(hdev))
512 return 0x01;
513
514 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
515 hdev->lmp_subver == 0x0757)
516 return 0x01;
517
518 if (hdev->manufacturer == 15) {
519 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
520 return 0x01;
521 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
522 return 0x01;
523 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
524 return 0x01;
525 }
526
527 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
528 hdev->lmp_subver == 0x1805)
529 return 0x01;
530
531 return 0x00;
532}
533
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535{
536 u8 mode;
537
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541}
542
Johan Hedberg42c6b122013-03-05 20:37:49 +0200543static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200544{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 struct hci_dev *hdev = req->hdev;
546
Johan Hedberg2177bab2013-03-05 20:37:43 +0200547 /* The second byte is 0xff instead of 0x9f (two reserved bits
548 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
549 * command otherwise.
550 */
551 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
552
553 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
554 * any event mask for pre 1.2 devices.
555 */
556 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
557 return;
558
559 if (lmp_bredr_capable(hdev)) {
560 events[4] |= 0x01; /* Flow Specification Complete */
561 events[4] |= 0x02; /* Inquiry Result with RSSI */
562 events[4] |= 0x04; /* Read Remote Extended Features Complete */
563 events[5] |= 0x08; /* Synchronous Connection Complete */
564 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700565 } else {
566 /* Use a different default for LE-only devices */
567 memset(events, 0, sizeof(events));
568 events[0] |= 0x10; /* Disconnection Complete */
569 events[0] |= 0x80; /* Encryption Change */
570 events[1] |= 0x08; /* Read Remote Version Information Complete */
571 events[1] |= 0x20; /* Command Complete */
572 events[1] |= 0x40; /* Command Status */
573 events[1] |= 0x80; /* Hardware Error */
574 events[2] |= 0x04; /* Number of Completed Packets */
575 events[3] |= 0x02; /* Data Buffer Overflow */
576 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200577 }
578
579 if (lmp_inq_rssi_capable(hdev))
580 events[4] |= 0x02; /* Inquiry Result with RSSI */
581
582 if (lmp_sniffsubr_capable(hdev))
583 events[5] |= 0x20; /* Sniff Subrating */
584
585 if (lmp_pause_enc_capable(hdev))
586 events[5] |= 0x80; /* Encryption Key Refresh Complete */
587
588 if (lmp_ext_inq_capable(hdev))
589 events[5] |= 0x40; /* Extended Inquiry Result */
590
591 if (lmp_no_flush_capable(hdev))
592 events[7] |= 0x01; /* Enhanced Flush Complete */
593
594 if (lmp_lsto_capable(hdev))
595 events[6] |= 0x80; /* Link Supervision Timeout Changed */
596
597 if (lmp_ssp_capable(hdev)) {
598 events[6] |= 0x01; /* IO Capability Request */
599 events[6] |= 0x02; /* IO Capability Response */
600 events[6] |= 0x04; /* User Confirmation Request */
601 events[6] |= 0x08; /* User Passkey Request */
602 events[6] |= 0x10; /* Remote OOB Data Request */
603 events[6] |= 0x20; /* Simple Pairing Complete */
604 events[7] |= 0x04; /* User Passkey Notification */
605 events[7] |= 0x08; /* Keypress Notification */
606 events[7] |= 0x10; /* Remote Host Supported
607 * Features Notification
608 */
609 }
610
611 if (lmp_le_capable(hdev))
612 events[7] |= 0x20; /* LE Meta-Event */
613
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200615
616 if (lmp_le_capable(hdev)) {
617 memset(events, 0, sizeof(events));
618 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200619 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
620 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200621 }
622}
623
Johan Hedberg42c6b122013-03-05 20:37:49 +0200624static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200625{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200626 struct hci_dev *hdev = req->hdev;
627
Johan Hedberg2177bab2013-03-05 20:37:43 +0200628 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200629 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300630 else
631 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200632
633 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200634 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200635
Johan Hedberg42c6b122013-03-05 20:37:49 +0200636 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200637
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300638 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
639 * local supported commands HCI command.
640 */
641 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200642 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200643
644 if (lmp_ssp_capable(hdev)) {
645 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
646 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200647 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
648 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200649 } else {
650 struct hci_cp_write_eir cp;
651
652 memset(hdev->eir, 0, sizeof(hdev->eir));
653 memset(&cp, 0, sizeof(cp));
654
Johan Hedberg42c6b122013-03-05 20:37:49 +0200655 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200656 }
657 }
658
659 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200660 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200661
662 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200663 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200664
665 if (lmp_ext_feat_capable(hdev)) {
666 struct hci_cp_read_local_ext_features cp;
667
668 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200669 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
670 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200671 }
672
673 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
674 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200675 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
676 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200677 }
678}
679
Johan Hedberg42c6b122013-03-05 20:37:49 +0200680static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200681{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200682 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200683 struct hci_cp_write_def_link_policy cp;
684 u16 link_policy = 0;
685
686 if (lmp_rswitch_capable(hdev))
687 link_policy |= HCI_LP_RSWITCH;
688 if (lmp_hold_capable(hdev))
689 link_policy |= HCI_LP_HOLD;
690 if (lmp_sniff_capable(hdev))
691 link_policy |= HCI_LP_SNIFF;
692 if (lmp_park_capable(hdev))
693 link_policy |= HCI_LP_PARK;
694
695 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200696 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200697}
698
Johan Hedberg42c6b122013-03-05 20:37:49 +0200699static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200700{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200701 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200702 struct hci_cp_write_le_host_supported cp;
703
Johan Hedbergc73eee92013-04-19 18:35:21 +0300704 /* LE-only devices do not support explicit enablement */
705 if (!lmp_bredr_capable(hdev))
706 return;
707
Johan Hedberg2177bab2013-03-05 20:37:43 +0200708 memset(&cp, 0, sizeof(cp));
709
710 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
711 cp.le = 0x01;
712 cp.simul = lmp_le_br_capable(hdev);
713 }
714
715 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200716 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
717 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200718}
719
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300720static void hci_set_event_mask_page_2(struct hci_request *req)
721{
722 struct hci_dev *hdev = req->hdev;
723 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
724
725 /* If Connectionless Slave Broadcast master role is supported
726 * enable all necessary events for it.
727 */
728 if (hdev->features[2][0] & 0x01) {
729 events[1] |= 0x40; /* Triggered Clock Capture */
730 events[1] |= 0x80; /* Synchronization Train Complete */
731 events[2] |= 0x10; /* Slave Page Response Timeout */
732 events[2] |= 0x20; /* CSB Channel Map Change */
733 }
734
735 /* If Connectionless Slave Broadcast slave role is supported
736 * enable all necessary events for it.
737 */
738 if (hdev->features[2][0] & 0x02) {
739 events[2] |= 0x01; /* Synchronization Train Received */
740 events[2] |= 0x02; /* CSB Receive */
741 events[2] |= 0x04; /* CSB Timeout */
742 events[2] |= 0x08; /* Truncated Page Complete */
743 }
744
745 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
746}
747
Johan Hedberg42c6b122013-03-05 20:37:49 +0200748static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200749{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300751 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100753 /* Some Broadcom based Bluetooth controllers do not support the
754 * Delete Stored Link Key command. They are clearly indicating its
755 * absence in the bit mask of supported commands.
756 *
757 * Check the supported commands and only if the the command is marked
758 * as supported send it. If not supported assume that the controller
759 * does not have actual support for stored link keys which makes this
760 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700761 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300762 if (hdev->commands[6] & 0x80) {
763 struct hci_cp_delete_stored_link_key cp;
764
765 bacpy(&cp.bdaddr, BDADDR_ANY);
766 cp.delete_all = 0x01;
767 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
768 sizeof(cp), &cp);
769 }
770
Johan Hedberg2177bab2013-03-05 20:37:43 +0200771 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200772 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200773
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700774 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200775 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300776
777 /* Read features beyond page 1 if available */
778 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
779 struct hci_cp_read_local_ext_features cp;
780
781 cp.page = p;
782 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
783 sizeof(cp), &cp);
784 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200785}
786
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300787static void hci_init4_req(struct hci_request *req, unsigned long opt)
788{
789 struct hci_dev *hdev = req->hdev;
790
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300791 /* Set event mask page 2 if the HCI command for it is supported */
792 if (hdev->commands[22] & 0x04)
793 hci_set_event_mask_page_2(req);
794
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300795 /* Check for Synchronization Train support */
796 if (hdev->features[2][0] & 0x04)
797 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
798}
799
Johan Hedberg2177bab2013-03-05 20:37:43 +0200800static int __hci_init(struct hci_dev *hdev)
801{
802 int err;
803
804 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
805 if (err < 0)
806 return err;
807
808 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
809 * BR/EDR/LE type controllers. AMP controllers only need the
810 * first stage init.
811 */
812 if (hdev->dev_type != HCI_BREDR)
813 return 0;
814
815 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
816 if (err < 0)
817 return err;
818
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300819 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
820 if (err < 0)
821 return err;
822
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700823 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
824 if (err < 0)
825 return err;
826
827 /* Only create debugfs entries during the initial setup
828 * phase and not every time the controller gets powered on.
829 */
830 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
831 return 0;
832
833 if (lmp_bredr_capable(hdev)) {
834 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
835 hdev, &inquiry_cache_fops);
836 }
837
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700838 if (lmp_ssp_capable(hdev))
839 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
840 hdev, &auto_accept_delay_fops);
841
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700842 if (lmp_le_capable(hdev))
843 debugfs_create_file("static_address", 0444, hdev->debugfs,
844 hdev, &static_address_fops);
845
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700846 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200847}
848
Johan Hedberg42c6b122013-03-05 20:37:49 +0200849static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850{
851 __u8 scan = opt;
852
Johan Hedberg42c6b122013-03-05 20:37:49 +0200853 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
855 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200856 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857}
858
Johan Hedberg42c6b122013-03-05 20:37:49 +0200859static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
861 __u8 auth = opt;
862
Johan Hedberg42c6b122013-03-05 20:37:49 +0200863 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
865 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200866 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867}
868
Johan Hedberg42c6b122013-03-05 20:37:49 +0200869static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870{
871 __u8 encrypt = opt;
872
Johan Hedberg42c6b122013-03-05 20:37:49 +0200873 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200875 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200876 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877}
878
Johan Hedberg42c6b122013-03-05 20:37:49 +0200879static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200880{
881 __le16 policy = cpu_to_le16(opt);
882
Johan Hedberg42c6b122013-03-05 20:37:49 +0200883 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200884
885 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200886 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200887}
888
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900889/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 * Device is held on return. */
891struct hci_dev *hci_dev_get(int index)
892{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200893 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
895 BT_DBG("%d", index);
896
897 if (index < 0)
898 return NULL;
899
900 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200901 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 if (d->id == index) {
903 hdev = hci_dev_hold(d);
904 break;
905 }
906 }
907 read_unlock(&hci_dev_list_lock);
908 return hdev;
909}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
911/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200912
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200913bool hci_discovery_active(struct hci_dev *hdev)
914{
915 struct discovery_state *discov = &hdev->discovery;
916
Andre Guedes6fbe1952012-02-03 17:47:58 -0300917 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300918 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300919 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200920 return true;
921
Andre Guedes6fbe1952012-02-03 17:47:58 -0300922 default:
923 return false;
924 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200925}
926
Johan Hedbergff9ef572012-01-04 14:23:45 +0200927void hci_discovery_set_state(struct hci_dev *hdev, int state)
928{
929 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
930
931 if (hdev->discovery.state == state)
932 return;
933
934 switch (state) {
935 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300936 if (hdev->discovery.state != DISCOVERY_STARTING)
937 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200938 break;
939 case DISCOVERY_STARTING:
940 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300941 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200942 mgmt_discovering(hdev, 1);
943 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200944 case DISCOVERY_RESOLVING:
945 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200946 case DISCOVERY_STOPPING:
947 break;
948 }
949
950 hdev->discovery.state = state;
951}
952
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300953void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
Johan Hedberg30883512012-01-04 14:16:21 +0200955 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200956 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
Johan Hedberg561aafb2012-01-04 13:31:59 +0200958 list_for_each_entry_safe(p, n, &cache->all, all) {
959 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200960 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200962
963 INIT_LIST_HEAD(&cache->unknown);
964 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965}
966
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300967struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
968 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
Johan Hedberg30883512012-01-04 14:16:21 +0200970 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 struct inquiry_entry *e;
972
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300973 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
Johan Hedberg561aafb2012-01-04 13:31:59 +0200975 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200977 return e;
978 }
979
980 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981}
982
Johan Hedberg561aafb2012-01-04 13:31:59 +0200983struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300984 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200985{
Johan Hedberg30883512012-01-04 14:16:21 +0200986 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200987 struct inquiry_entry *e;
988
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300989 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200990
991 list_for_each_entry(e, &cache->unknown, list) {
992 if (!bacmp(&e->data.bdaddr, bdaddr))
993 return e;
994 }
995
996 return NULL;
997}
998
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200999struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001000 bdaddr_t *bdaddr,
1001 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001002{
1003 struct discovery_state *cache = &hdev->discovery;
1004 struct inquiry_entry *e;
1005
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001006 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001007
1008 list_for_each_entry(e, &cache->resolve, list) {
1009 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1010 return e;
1011 if (!bacmp(&e->data.bdaddr, bdaddr))
1012 return e;
1013 }
1014
1015 return NULL;
1016}
1017
Johan Hedberga3d4e202012-01-09 00:53:02 +02001018void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001019 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001020{
1021 struct discovery_state *cache = &hdev->discovery;
1022 struct list_head *pos = &cache->resolve;
1023 struct inquiry_entry *p;
1024
1025 list_del(&ie->list);
1026
1027 list_for_each_entry(p, &cache->resolve, list) {
1028 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001029 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001030 break;
1031 pos = &p->list;
1032 }
1033
1034 list_add(&ie->list, pos);
1035}
1036
Johan Hedberg31754052012-01-04 13:39:52 +02001037bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001038 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039{
Johan Hedberg30883512012-01-04 14:16:21 +02001040 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001041 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001043 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
Szymon Janc2b2fec42012-11-20 11:38:54 +01001045 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1046
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001047 if (ssp)
1048 *ssp = data->ssp_mode;
1049
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001050 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001051 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001052 if (ie->data.ssp_mode && ssp)
1053 *ssp = true;
1054
Johan Hedberga3d4e202012-01-09 00:53:02 +02001055 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001056 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001057 ie->data.rssi = data->rssi;
1058 hci_inquiry_cache_update_resolve(hdev, ie);
1059 }
1060
Johan Hedberg561aafb2012-01-04 13:31:59 +02001061 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001062 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001063
Johan Hedberg561aafb2012-01-04 13:31:59 +02001064 /* Entry not in the cache. Add new one. */
1065 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1066 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001067 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001068
1069 list_add(&ie->all, &cache->all);
1070
1071 if (name_known) {
1072 ie->name_state = NAME_KNOWN;
1073 } else {
1074 ie->name_state = NAME_NOT_KNOWN;
1075 list_add(&ie->list, &cache->unknown);
1076 }
1077
1078update:
1079 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001080 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001081 ie->name_state = NAME_KNOWN;
1082 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 }
1084
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001085 memcpy(&ie->data, data, sizeof(*data));
1086 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001088
1089 if (ie->name_state == NAME_NOT_KNOWN)
1090 return false;
1091
1092 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093}
1094
1095static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1096{
Johan Hedberg30883512012-01-04 14:16:21 +02001097 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 struct inquiry_info *info = (struct inquiry_info *) buf;
1099 struct inquiry_entry *e;
1100 int copied = 0;
1101
Johan Hedberg561aafb2012-01-04 13:31:59 +02001102 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001104
1105 if (copied >= num)
1106 break;
1107
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 bacpy(&info->bdaddr, &data->bdaddr);
1109 info->pscan_rep_mode = data->pscan_rep_mode;
1110 info->pscan_period_mode = data->pscan_period_mode;
1111 info->pscan_mode = data->pscan_mode;
1112 memcpy(info->dev_class, data->dev_class, 3);
1113 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001114
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001116 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 }
1118
1119 BT_DBG("cache %p, copied %d", cache, copied);
1120 return copied;
1121}
1122
Johan Hedberg42c6b122013-03-05 20:37:49 +02001123static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124{
1125 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001126 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 struct hci_cp_inquiry cp;
1128
1129 BT_DBG("%s", hdev->name);
1130
1131 if (test_bit(HCI_INQUIRY, &hdev->flags))
1132 return;
1133
1134 /* Start Inquiry */
1135 memcpy(&cp.lap, &ir->lap, 3);
1136 cp.length = ir->length;
1137 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001138 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139}
1140
Andre Guedes3e13fa12013-03-27 20:04:56 -03001141static int wait_inquiry(void *word)
1142{
1143 schedule();
1144 return signal_pending(current);
1145}
1146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147int hci_inquiry(void __user *arg)
1148{
1149 __u8 __user *ptr = arg;
1150 struct hci_inquiry_req ir;
1151 struct hci_dev *hdev;
1152 int err = 0, do_inquiry = 0, max_rsp;
1153 long timeo;
1154 __u8 *buf;
1155
1156 if (copy_from_user(&ir, ptr, sizeof(ir)))
1157 return -EFAULT;
1158
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001159 hdev = hci_dev_get(ir.dev_id);
1160 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 return -ENODEV;
1162
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001163 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1164 err = -EBUSY;
1165 goto done;
1166 }
1167
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001168 if (hdev->dev_type != HCI_BREDR) {
1169 err = -EOPNOTSUPP;
1170 goto done;
1171 }
1172
Johan Hedberg56f87902013-10-02 13:43:13 +03001173 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1174 err = -EOPNOTSUPP;
1175 goto done;
1176 }
1177
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001178 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001179 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001180 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001181 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 do_inquiry = 1;
1183 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001184 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
Marcel Holtmann04837f62006-07-03 10:02:33 +02001186 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001187
1188 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001189 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1190 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001191 if (err < 0)
1192 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001193
1194 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1195 * cleared). If it is interrupted by a signal, return -EINTR.
1196 */
1197 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1198 TASK_INTERRUPTIBLE))
1199 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001200 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001202 /* for unlimited number of responses we will use buffer with
1203 * 255 entries
1204 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1206
1207 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1208 * copy it to the user space.
1209 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001210 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001211 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 err = -ENOMEM;
1213 goto done;
1214 }
1215
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001216 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001218 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
1220 BT_DBG("num_rsp %d", ir.num_rsp);
1221
1222 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1223 ptr += sizeof(ir);
1224 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001225 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001227 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 err = -EFAULT;
1229
1230 kfree(buf);
1231
1232done:
1233 hci_dev_put(hdev);
1234 return err;
1235}
1236
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001237static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 int ret = 0;
1240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 BT_DBG("%s %p", hdev->name, hdev);
1242
1243 hci_req_lock(hdev);
1244
Johan Hovold94324962012-03-15 14:48:41 +01001245 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1246 ret = -ENODEV;
1247 goto done;
1248 }
1249
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001250 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1251 /* Check for rfkill but allow the HCI setup stage to
1252 * proceed (which in itself doesn't cause any RF activity).
1253 */
1254 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1255 ret = -ERFKILL;
1256 goto done;
1257 }
1258
1259 /* Check for valid public address or a configured static
1260 * random adddress, but let the HCI setup proceed to
1261 * be able to determine if there is a public address
1262 * or not.
1263 *
1264 * This check is only valid for BR/EDR controllers
1265 * since AMP controllers do not have an address.
1266 */
1267 if (hdev->dev_type == HCI_BREDR &&
1268 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1269 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1270 ret = -EADDRNOTAVAIL;
1271 goto done;
1272 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001273 }
1274
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 if (test_bit(HCI_UP, &hdev->flags)) {
1276 ret = -EALREADY;
1277 goto done;
1278 }
1279
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 if (hdev->open(hdev)) {
1281 ret = -EIO;
1282 goto done;
1283 }
1284
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001285 atomic_set(&hdev->cmd_cnt, 1);
1286 set_bit(HCI_INIT, &hdev->flags);
1287
1288 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1289 ret = hdev->setup(hdev);
1290
1291 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001292 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1293 set_bit(HCI_RAW, &hdev->flags);
1294
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001295 if (!test_bit(HCI_RAW, &hdev->flags) &&
1296 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001297 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 }
1299
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001300 clear_bit(HCI_INIT, &hdev->flags);
1301
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 if (!ret) {
1303 hci_dev_hold(hdev);
1304 set_bit(HCI_UP, &hdev->flags);
1305 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001306 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001307 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001308 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001309 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001310 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001311 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001312 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001313 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001315 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001316 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001317 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
1319 skb_queue_purge(&hdev->cmd_q);
1320 skb_queue_purge(&hdev->rx_q);
1321
1322 if (hdev->flush)
1323 hdev->flush(hdev);
1324
1325 if (hdev->sent_cmd) {
1326 kfree_skb(hdev->sent_cmd);
1327 hdev->sent_cmd = NULL;
1328 }
1329
1330 hdev->close(hdev);
1331 hdev->flags = 0;
1332 }
1333
1334done:
1335 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 return ret;
1337}
1338
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001339/* ---- HCI ioctl helpers ---- */
1340
1341int hci_dev_open(__u16 dev)
1342{
1343 struct hci_dev *hdev;
1344 int err;
1345
1346 hdev = hci_dev_get(dev);
1347 if (!hdev)
1348 return -ENODEV;
1349
Johan Hedberge1d08f42013-10-01 22:44:50 +03001350 /* We need to ensure that no other power on/off work is pending
1351 * before proceeding to call hci_dev_do_open. This is
1352 * particularly important if the setup procedure has not yet
1353 * completed.
1354 */
1355 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1356 cancel_delayed_work(&hdev->power_off);
1357
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001358 /* After this call it is guaranteed that the setup procedure
1359 * has finished. This means that error conditions like RFKILL
1360 * or no valid public or static random address apply.
1361 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001362 flush_workqueue(hdev->req_workqueue);
1363
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001364 err = hci_dev_do_open(hdev);
1365
1366 hci_dev_put(hdev);
1367
1368 return err;
1369}
1370
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371static int hci_dev_do_close(struct hci_dev *hdev)
1372{
1373 BT_DBG("%s %p", hdev->name, hdev);
1374
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001375 cancel_delayed_work(&hdev->power_off);
1376
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 hci_req_cancel(hdev, ENODEV);
1378 hci_req_lock(hdev);
1379
1380 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001381 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 hci_req_unlock(hdev);
1383 return 0;
1384 }
1385
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001386 /* Flush RX and TX works */
1387 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001388 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001390 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001391 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001392 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001393 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001394 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001395 }
1396
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001397 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001398 cancel_delayed_work(&hdev->service_cache);
1399
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001400 cancel_delayed_work_sync(&hdev->le_scan_disable);
1401
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001402 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001403 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001405 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406
1407 hci_notify(hdev, HCI_DEV_DOWN);
1408
1409 if (hdev->flush)
1410 hdev->flush(hdev);
1411
1412 /* Reset device */
1413 skb_queue_purge(&hdev->cmd_q);
1414 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001415 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001416 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001417 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001419 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 clear_bit(HCI_INIT, &hdev->flags);
1421 }
1422
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001423 /* flush cmd work */
1424 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425
1426 /* Drop queues */
1427 skb_queue_purge(&hdev->rx_q);
1428 skb_queue_purge(&hdev->cmd_q);
1429 skb_queue_purge(&hdev->raw_q);
1430
1431 /* Drop last sent command */
1432 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001433 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 kfree_skb(hdev->sent_cmd);
1435 hdev->sent_cmd = NULL;
1436 }
1437
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001438 kfree_skb(hdev->recv_evt);
1439 hdev->recv_evt = NULL;
1440
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 /* After this point our queues are empty
1442 * and no tasks are scheduled. */
1443 hdev->close(hdev);
1444
Johan Hedberg35b973c2013-03-15 17:06:59 -05001445 /* Clear flags */
1446 hdev->flags = 0;
1447 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1448
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001449 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1450 if (hdev->dev_type == HCI_BREDR) {
1451 hci_dev_lock(hdev);
1452 mgmt_powered(hdev, 0);
1453 hci_dev_unlock(hdev);
1454 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001455 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001456
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001457 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001458 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001459
Johan Hedberge59fda82012-02-22 18:11:53 +02001460 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001461 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001462
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 hci_req_unlock(hdev);
1464
1465 hci_dev_put(hdev);
1466 return 0;
1467}
1468
1469int hci_dev_close(__u16 dev)
1470{
1471 struct hci_dev *hdev;
1472 int err;
1473
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001474 hdev = hci_dev_get(dev);
1475 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001477
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001478 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1479 err = -EBUSY;
1480 goto done;
1481 }
1482
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001483 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1484 cancel_delayed_work(&hdev->power_off);
1485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001487
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001488done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 hci_dev_put(hdev);
1490 return err;
1491}
1492
1493int hci_dev_reset(__u16 dev)
1494{
1495 struct hci_dev *hdev;
1496 int ret = 0;
1497
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001498 hdev = hci_dev_get(dev);
1499 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 return -ENODEV;
1501
1502 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
Marcel Holtmann808a0492013-08-26 20:57:58 -07001504 if (!test_bit(HCI_UP, &hdev->flags)) {
1505 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001509 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1510 ret = -EBUSY;
1511 goto done;
1512 }
1513
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 /* Drop queues */
1515 skb_queue_purge(&hdev->rx_q);
1516 skb_queue_purge(&hdev->cmd_q);
1517
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001518 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001519 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001521 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
1523 if (hdev->flush)
1524 hdev->flush(hdev);
1525
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001526 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001527 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
1529 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001530 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
1532done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 hci_req_unlock(hdev);
1534 hci_dev_put(hdev);
1535 return ret;
1536}
1537
1538int hci_dev_reset_stat(__u16 dev)
1539{
1540 struct hci_dev *hdev;
1541 int ret = 0;
1542
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001543 hdev = hci_dev_get(dev);
1544 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 return -ENODEV;
1546
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001547 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1548 ret = -EBUSY;
1549 goto done;
1550 }
1551
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1553
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001554done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 return ret;
1557}
1558
1559int hci_dev_cmd(unsigned int cmd, void __user *arg)
1560{
1561 struct hci_dev *hdev;
1562 struct hci_dev_req dr;
1563 int err = 0;
1564
1565 if (copy_from_user(&dr, arg, sizeof(dr)))
1566 return -EFAULT;
1567
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001568 hdev = hci_dev_get(dr.dev_id);
1569 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 return -ENODEV;
1571
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001572 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1573 err = -EBUSY;
1574 goto done;
1575 }
1576
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001577 if (hdev->dev_type != HCI_BREDR) {
1578 err = -EOPNOTSUPP;
1579 goto done;
1580 }
1581
Johan Hedberg56f87902013-10-02 13:43:13 +03001582 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1583 err = -EOPNOTSUPP;
1584 goto done;
1585 }
1586
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 switch (cmd) {
1588 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001589 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1590 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 break;
1592
1593 case HCISETENCRYPT:
1594 if (!lmp_encrypt_capable(hdev)) {
1595 err = -EOPNOTSUPP;
1596 break;
1597 }
1598
1599 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1600 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001601 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1602 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 if (err)
1604 break;
1605 }
1606
Johan Hedberg01178cd2013-03-05 20:37:41 +02001607 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1608 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 break;
1610
1611 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001612 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1613 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 break;
1615
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001616 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001617 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1618 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001619 break;
1620
1621 case HCISETLINKMODE:
1622 hdev->link_mode = ((__u16) dr.dev_opt) &
1623 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1624 break;
1625
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 case HCISETPTYPE:
1627 hdev->pkt_type = (__u16) dr.dev_opt;
1628 break;
1629
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001631 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1632 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 break;
1634
1635 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001636 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1637 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 break;
1639
1640 default:
1641 err = -EINVAL;
1642 break;
1643 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001644
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001645done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 hci_dev_put(hdev);
1647 return err;
1648}
1649
1650int hci_get_dev_list(void __user *arg)
1651{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001652 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 struct hci_dev_list_req *dl;
1654 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 int n = 0, size, err;
1656 __u16 dev_num;
1657
1658 if (get_user(dev_num, (__u16 __user *) arg))
1659 return -EFAULT;
1660
1661 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1662 return -EINVAL;
1663
1664 size = sizeof(*dl) + dev_num * sizeof(*dr);
1665
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001666 dl = kzalloc(size, GFP_KERNEL);
1667 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 return -ENOMEM;
1669
1670 dr = dl->dev_req;
1671
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001672 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001673 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001674 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001675 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001676
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001677 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1678 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001679
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 (dr + n)->dev_id = hdev->id;
1681 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001682
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 if (++n >= dev_num)
1684 break;
1685 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001686 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687
1688 dl->dev_num = n;
1689 size = sizeof(*dl) + n * sizeof(*dr);
1690
1691 err = copy_to_user(arg, dl, size);
1692 kfree(dl);
1693
1694 return err ? -EFAULT : 0;
1695}
1696
1697int hci_get_dev_info(void __user *arg)
1698{
1699 struct hci_dev *hdev;
1700 struct hci_dev_info di;
1701 int err = 0;
1702
1703 if (copy_from_user(&di, arg, sizeof(di)))
1704 return -EFAULT;
1705
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001706 hdev = hci_dev_get(di.dev_id);
1707 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 return -ENODEV;
1709
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001710 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001711 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001712
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001713 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1714 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001715
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 strcpy(di.name, hdev->name);
1717 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001718 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 di.flags = hdev->flags;
1720 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001721 if (lmp_bredr_capable(hdev)) {
1722 di.acl_mtu = hdev->acl_mtu;
1723 di.acl_pkts = hdev->acl_pkts;
1724 di.sco_mtu = hdev->sco_mtu;
1725 di.sco_pkts = hdev->sco_pkts;
1726 } else {
1727 di.acl_mtu = hdev->le_mtu;
1728 di.acl_pkts = hdev->le_pkts;
1729 di.sco_mtu = 0;
1730 di.sco_pkts = 0;
1731 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 di.link_policy = hdev->link_policy;
1733 di.link_mode = hdev->link_mode;
1734
1735 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1736 memcpy(&di.features, &hdev->features, sizeof(di.features));
1737
1738 if (copy_to_user(arg, &di, sizeof(di)))
1739 err = -EFAULT;
1740
1741 hci_dev_put(hdev);
1742
1743 return err;
1744}
1745
1746/* ---- Interface to HCI drivers ---- */
1747
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001748static int hci_rfkill_set_block(void *data, bool blocked)
1749{
1750 struct hci_dev *hdev = data;
1751
1752 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1753
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001754 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1755 return -EBUSY;
1756
Johan Hedberg5e130362013-09-13 08:58:17 +03001757 if (blocked) {
1758 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001759 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1760 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001761 } else {
1762 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001763 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001764
1765 return 0;
1766}
1767
1768static const struct rfkill_ops hci_rfkill_ops = {
1769 .set_block = hci_rfkill_set_block,
1770};
1771
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001772static void hci_power_on(struct work_struct *work)
1773{
1774 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001775 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001776
1777 BT_DBG("%s", hdev->name);
1778
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001779 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001780 if (err < 0) {
1781 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001782 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001783 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001784
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001785 /* During the HCI setup phase, a few error conditions are
1786 * ignored and they need to be checked now. If they are still
1787 * valid, it is important to turn the device back off.
1788 */
1789 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1790 (hdev->dev_type == HCI_BREDR &&
1791 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1792 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001793 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1794 hci_dev_do_close(hdev);
1795 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001796 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1797 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001798 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001799
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001800 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001801 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001802}
1803
1804static void hci_power_off(struct work_struct *work)
1805{
Johan Hedberg32435532011-11-07 22:16:04 +02001806 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001807 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001808
1809 BT_DBG("%s", hdev->name);
1810
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001811 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001812}
1813
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001814static void hci_discov_off(struct work_struct *work)
1815{
1816 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001817
1818 hdev = container_of(work, struct hci_dev, discov_off.work);
1819
1820 BT_DBG("%s", hdev->name);
1821
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07001822 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001823}
1824
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001825int hci_uuids_clear(struct hci_dev *hdev)
1826{
Johan Hedberg48210022013-01-27 00:31:28 +02001827 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001828
Johan Hedberg48210022013-01-27 00:31:28 +02001829 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1830 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001831 kfree(uuid);
1832 }
1833
1834 return 0;
1835}
1836
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001837int hci_link_keys_clear(struct hci_dev *hdev)
1838{
1839 struct list_head *p, *n;
1840
1841 list_for_each_safe(p, n, &hdev->link_keys) {
1842 struct link_key *key;
1843
1844 key = list_entry(p, struct link_key, list);
1845
1846 list_del(p);
1847 kfree(key);
1848 }
1849
1850 return 0;
1851}
1852
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001853int hci_smp_ltks_clear(struct hci_dev *hdev)
1854{
1855 struct smp_ltk *k, *tmp;
1856
1857 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1858 list_del(&k->list);
1859 kfree(k);
1860 }
1861
1862 return 0;
1863}
1864
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001865struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1866{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001867 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001868
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001869 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001870 if (bacmp(bdaddr, &k->bdaddr) == 0)
1871 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001872
1873 return NULL;
1874}
1875
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301876static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001877 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001878{
1879 /* Legacy key */
1880 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301881 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001882
1883 /* Debug keys are insecure so don't store them persistently */
1884 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301885 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001886
1887 /* Changed combination key and there's no previous one */
1888 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301889 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001890
1891 /* Security mode 3 case */
1892 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301893 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001894
1895 /* Neither local nor remote side had no-bonding as requirement */
1896 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301897 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001898
1899 /* Local side had dedicated bonding as requirement */
1900 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301901 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001902
1903 /* Remote side had dedicated bonding as requirement */
1904 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301905 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001906
1907 /* If none of the above criteria match, then don't store the key
1908 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301909 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001910}
1911
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001912struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001913{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001914 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001915
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001916 list_for_each_entry(k, &hdev->long_term_keys, list) {
1917 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001918 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001919 continue;
1920
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001921 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001922 }
1923
1924 return NULL;
1925}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001926
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001927struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001928 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001929{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001930 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001931
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001932 list_for_each_entry(k, &hdev->long_term_keys, list)
1933 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001934 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001935 return k;
1936
1937 return NULL;
1938}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001939
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001940int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001941 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001942{
1943 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301944 u8 old_key_type;
1945 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001946
1947 old_key = hci_find_link_key(hdev, bdaddr);
1948 if (old_key) {
1949 old_key_type = old_key->type;
1950 key = old_key;
1951 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001952 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001953 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1954 if (!key)
1955 return -ENOMEM;
1956 list_add(&key->list, &hdev->link_keys);
1957 }
1958
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001959 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001960
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001961 /* Some buggy controller combinations generate a changed
1962 * combination key for legacy pairing even when there's no
1963 * previous key */
1964 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001965 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001966 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001967 if (conn)
1968 conn->key_type = type;
1969 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001970
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001971 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001972 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001973 key->pin_len = pin_len;
1974
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001975 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001976 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001977 else
1978 key->type = type;
1979
Johan Hedberg4df378a2011-04-28 11:29:03 -07001980 if (!new_key)
1981 return 0;
1982
1983 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1984
Johan Hedberg744cf192011-11-08 20:40:14 +02001985 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001986
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301987 if (conn)
1988 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001989
1990 return 0;
1991}
1992
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001993int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001994 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001995 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001996{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001997 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001998
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001999 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2000 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002001
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002002 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2003 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002004 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002005 else {
2006 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002007 if (!key)
2008 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002009 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002010 }
2011
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002012 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002013 key->bdaddr_type = addr_type;
2014 memcpy(key->val, tk, sizeof(key->val));
2015 key->authenticated = authenticated;
2016 key->ediv = ediv;
2017 key->enc_size = enc_size;
2018 key->type = type;
2019 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002020
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002021 if (!new_key)
2022 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002023
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002024 if (type & HCI_SMP_LTK)
2025 mgmt_new_ltk(hdev, key, 1);
2026
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002027 return 0;
2028}
2029
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002030int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2031{
2032 struct link_key *key;
2033
2034 key = hci_find_link_key(hdev, bdaddr);
2035 if (!key)
2036 return -ENOENT;
2037
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002038 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002039
2040 list_del(&key->list);
2041 kfree(key);
2042
2043 return 0;
2044}
2045
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002046int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2047{
2048 struct smp_ltk *k, *tmp;
2049
2050 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2051 if (bacmp(bdaddr, &k->bdaddr))
2052 continue;
2053
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002054 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002055
2056 list_del(&k->list);
2057 kfree(k);
2058 }
2059
2060 return 0;
2061}
2062
Ville Tervo6bd32322011-02-16 16:32:41 +02002063/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002064static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002065{
2066 struct hci_dev *hdev = (void *) arg;
2067
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002068 if (hdev->sent_cmd) {
2069 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2070 u16 opcode = __le16_to_cpu(sent->opcode);
2071
2072 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2073 } else {
2074 BT_ERR("%s command tx timeout", hdev->name);
2075 }
2076
Ville Tervo6bd32322011-02-16 16:32:41 +02002077 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002078 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002079}
2080
Szymon Janc2763eda2011-03-22 13:12:22 +01002081struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002082 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002083{
2084 struct oob_data *data;
2085
2086 list_for_each_entry(data, &hdev->remote_oob_data, list)
2087 if (bacmp(bdaddr, &data->bdaddr) == 0)
2088 return data;
2089
2090 return NULL;
2091}
2092
2093int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2094{
2095 struct oob_data *data;
2096
2097 data = hci_find_remote_oob_data(hdev, bdaddr);
2098 if (!data)
2099 return -ENOENT;
2100
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002101 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002102
2103 list_del(&data->list);
2104 kfree(data);
2105
2106 return 0;
2107}
2108
2109int hci_remote_oob_data_clear(struct hci_dev *hdev)
2110{
2111 struct oob_data *data, *n;
2112
2113 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2114 list_del(&data->list);
2115 kfree(data);
2116 }
2117
2118 return 0;
2119}
2120
2121int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002122 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002123{
2124 struct oob_data *data;
2125
2126 data = hci_find_remote_oob_data(hdev, bdaddr);
2127
2128 if (!data) {
2129 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2130 if (!data)
2131 return -ENOMEM;
2132
2133 bacpy(&data->bdaddr, bdaddr);
2134 list_add(&data->list, &hdev->remote_oob_data);
2135 }
2136
2137 memcpy(data->hash, hash, sizeof(data->hash));
2138 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2139
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002140 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002141
2142 return 0;
2143}
2144
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002145struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002146{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002147 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002148
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002149 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002150 if (bacmp(bdaddr, &b->bdaddr) == 0)
2151 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002152
2153 return NULL;
2154}
2155
2156int hci_blacklist_clear(struct hci_dev *hdev)
2157{
2158 struct list_head *p, *n;
2159
2160 list_for_each_safe(p, n, &hdev->blacklist) {
2161 struct bdaddr_list *b;
2162
2163 b = list_entry(p, struct bdaddr_list, list);
2164
2165 list_del(p);
2166 kfree(b);
2167 }
2168
2169 return 0;
2170}
2171
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002172int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002173{
2174 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002175
2176 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2177 return -EBADF;
2178
Antti Julku5e762442011-08-25 16:48:02 +03002179 if (hci_blacklist_lookup(hdev, bdaddr))
2180 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002181
2182 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002183 if (!entry)
2184 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002185
2186 bacpy(&entry->bdaddr, bdaddr);
2187
2188 list_add(&entry->list, &hdev->blacklist);
2189
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002190 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002191}
2192
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002193int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002194{
2195 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002196
Szymon Janc1ec918c2011-11-16 09:32:21 +01002197 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002198 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002199
2200 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002201 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002202 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002203
2204 list_del(&entry->list);
2205 kfree(entry);
2206
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002207 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002208}
2209
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002210static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002211{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002212 if (status) {
2213 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002214
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002215 hci_dev_lock(hdev);
2216 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2217 hci_dev_unlock(hdev);
2218 return;
2219 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002220}
2221
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002222static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002223{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002224 /* General inquiry access code (GIAC) */
2225 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2226 struct hci_request req;
2227 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002228 int err;
2229
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002230 if (status) {
2231 BT_ERR("Failed to disable LE scanning: status %d", status);
2232 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002233 }
2234
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002235 switch (hdev->discovery.type) {
2236 case DISCOV_TYPE_LE:
2237 hci_dev_lock(hdev);
2238 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2239 hci_dev_unlock(hdev);
2240 break;
2241
2242 case DISCOV_TYPE_INTERLEAVED:
2243 hci_req_init(&req, hdev);
2244
2245 memset(&cp, 0, sizeof(cp));
2246 memcpy(&cp.lap, lap, sizeof(cp.lap));
2247 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2248 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2249
2250 hci_dev_lock(hdev);
2251
2252 hci_inquiry_cache_flush(hdev);
2253
2254 err = hci_req_run(&req, inquiry_complete);
2255 if (err) {
2256 BT_ERR("Inquiry request failed: err %d", err);
2257 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2258 }
2259
2260 hci_dev_unlock(hdev);
2261 break;
2262 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002263}
2264
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002265static void le_scan_disable_work(struct work_struct *work)
2266{
2267 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002268 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002269 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002270 struct hci_request req;
2271 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002272
2273 BT_DBG("%s", hdev->name);
2274
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002275 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002276
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002277 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002278 cp.enable = LE_SCAN_DISABLE;
2279 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002280
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002281 err = hci_req_run(&req, le_scan_disable_work_complete);
2282 if (err)
2283 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002284}
2285
David Herrmann9be0dab2012-04-22 14:39:57 +02002286/* Alloc HCI device */
2287struct hci_dev *hci_alloc_dev(void)
2288{
2289 struct hci_dev *hdev;
2290
2291 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2292 if (!hdev)
2293 return NULL;
2294
David Herrmannb1b813d2012-04-22 14:39:58 +02002295 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2296 hdev->esco_type = (ESCO_HV1);
2297 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002298 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2299 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002300 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2301 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002302
David Herrmannb1b813d2012-04-22 14:39:58 +02002303 hdev->sniff_max_interval = 800;
2304 hdev->sniff_min_interval = 80;
2305
Marcel Holtmannbef64732013-10-11 08:23:19 -07002306 hdev->le_scan_interval = 0x0060;
2307 hdev->le_scan_window = 0x0030;
2308
David Herrmannb1b813d2012-04-22 14:39:58 +02002309 mutex_init(&hdev->lock);
2310 mutex_init(&hdev->req_lock);
2311
2312 INIT_LIST_HEAD(&hdev->mgmt_pending);
2313 INIT_LIST_HEAD(&hdev->blacklist);
2314 INIT_LIST_HEAD(&hdev->uuids);
2315 INIT_LIST_HEAD(&hdev->link_keys);
2316 INIT_LIST_HEAD(&hdev->long_term_keys);
2317 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002318 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002319
2320 INIT_WORK(&hdev->rx_work, hci_rx_work);
2321 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2322 INIT_WORK(&hdev->tx_work, hci_tx_work);
2323 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002324
David Herrmannb1b813d2012-04-22 14:39:58 +02002325 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2326 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2327 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2328
David Herrmannb1b813d2012-04-22 14:39:58 +02002329 skb_queue_head_init(&hdev->rx_q);
2330 skb_queue_head_init(&hdev->cmd_q);
2331 skb_queue_head_init(&hdev->raw_q);
2332
2333 init_waitqueue_head(&hdev->req_wait_q);
2334
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002335 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002336
David Herrmannb1b813d2012-04-22 14:39:58 +02002337 hci_init_sysfs(hdev);
2338 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002339
2340 return hdev;
2341}
2342EXPORT_SYMBOL(hci_alloc_dev);
2343
2344/* Free HCI device */
2345void hci_free_dev(struct hci_dev *hdev)
2346{
David Herrmann9be0dab2012-04-22 14:39:57 +02002347 /* will free via device release */
2348 put_device(&hdev->dev);
2349}
2350EXPORT_SYMBOL(hci_free_dev);
2351
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352/* Register HCI device */
2353int hci_register_dev(struct hci_dev *hdev)
2354{
David Herrmannb1b813d2012-04-22 14:39:58 +02002355 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356
David Herrmann010666a2012-01-07 15:47:07 +01002357 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 return -EINVAL;
2359
Mat Martineau08add512011-11-02 16:18:36 -07002360 /* Do not allow HCI_AMP devices to register at index 0,
2361 * so the index can be used as the AMP controller ID.
2362 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002363 switch (hdev->dev_type) {
2364 case HCI_BREDR:
2365 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2366 break;
2367 case HCI_AMP:
2368 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2369 break;
2370 default:
2371 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002373
Sasha Levin3df92b32012-05-27 22:36:56 +02002374 if (id < 0)
2375 return id;
2376
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 sprintf(hdev->name, "hci%d", id);
2378 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002379
2380 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2381
Kees Cookd8537542013-07-03 15:04:57 -07002382 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2383 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002384 if (!hdev->workqueue) {
2385 error = -ENOMEM;
2386 goto err;
2387 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002388
Kees Cookd8537542013-07-03 15:04:57 -07002389 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2390 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002391 if (!hdev->req_workqueue) {
2392 destroy_workqueue(hdev->workqueue);
2393 error = -ENOMEM;
2394 goto err;
2395 }
2396
David Herrmann33ca9542011-10-08 14:58:49 +02002397 error = hci_add_sysfs(hdev);
2398 if (error < 0)
2399 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002401 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002402 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2403 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002404 if (hdev->rfkill) {
2405 if (rfkill_register(hdev->rfkill) < 0) {
2406 rfkill_destroy(hdev->rfkill);
2407 hdev->rfkill = NULL;
2408 }
2409 }
2410
Johan Hedberg5e130362013-09-13 08:58:17 +03002411 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2412 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2413
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002414 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002415 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002416
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002417 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002418 /* Assume BR/EDR support until proven otherwise (such as
2419 * through reading supported features during init.
2420 */
2421 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2422 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002423
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002424 write_lock(&hci_dev_list_lock);
2425 list_add(&hdev->list, &hci_dev_list);
2426 write_unlock(&hci_dev_list_lock);
2427
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002429 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430
Johan Hedberg19202572013-01-14 22:33:51 +02002431 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002432
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002434
David Herrmann33ca9542011-10-08 14:58:49 +02002435err_wqueue:
2436 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002437 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002438err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002439 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002440
David Herrmann33ca9542011-10-08 14:58:49 +02002441 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442}
2443EXPORT_SYMBOL(hci_register_dev);
2444
2445/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002446void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447{
Sasha Levin3df92b32012-05-27 22:36:56 +02002448 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002449
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002450 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451
Johan Hovold94324962012-03-15 14:48:41 +01002452 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2453
Sasha Levin3df92b32012-05-27 22:36:56 +02002454 id = hdev->id;
2455
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002456 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002458 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459
2460 hci_dev_do_close(hdev);
2461
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302462 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002463 kfree_skb(hdev->reassembly[i]);
2464
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002465 cancel_work_sync(&hdev->power_on);
2466
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002467 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002468 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002469 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002470 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002471 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002472 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002473
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002474 /* mgmt_index_removed should take care of emptying the
2475 * pending list */
2476 BUG_ON(!list_empty(&hdev->mgmt_pending));
2477
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 hci_notify(hdev, HCI_DEV_UNREG);
2479
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002480 if (hdev->rfkill) {
2481 rfkill_unregister(hdev->rfkill);
2482 rfkill_destroy(hdev->rfkill);
2483 }
2484
David Herrmannce242972011-10-08 14:58:48 +02002485 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002486
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002487 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002488 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002489
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002490 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002491 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002492 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002493 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002494 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002495 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002496 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002497
David Herrmanndc946bd2012-01-07 15:47:24 +01002498 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002499
2500 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501}
2502EXPORT_SYMBOL(hci_unregister_dev);
2503
2504/* Suspend HCI device */
2505int hci_suspend_dev(struct hci_dev *hdev)
2506{
2507 hci_notify(hdev, HCI_DEV_SUSPEND);
2508 return 0;
2509}
2510EXPORT_SYMBOL(hci_suspend_dev);
2511
2512/* Resume HCI device */
2513int hci_resume_dev(struct hci_dev *hdev)
2514{
2515 hci_notify(hdev, HCI_DEV_RESUME);
2516 return 0;
2517}
2518EXPORT_SYMBOL(hci_resume_dev);
2519
Marcel Holtmann76bca882009-11-18 00:40:39 +01002520/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002521int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002522{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002523 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002524 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002525 kfree_skb(skb);
2526 return -ENXIO;
2527 }
2528
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002529 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002530 bt_cb(skb)->incoming = 1;
2531
2532 /* Time stamp */
2533 __net_timestamp(skb);
2534
Marcel Holtmann76bca882009-11-18 00:40:39 +01002535 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002536 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002537
Marcel Holtmann76bca882009-11-18 00:40:39 +01002538 return 0;
2539}
2540EXPORT_SYMBOL(hci_recv_frame);
2541
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302542static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002543 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302544{
2545 int len = 0;
2546 int hlen = 0;
2547 int remain = count;
2548 struct sk_buff *skb;
2549 struct bt_skb_cb *scb;
2550
2551 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002552 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302553 return -EILSEQ;
2554
2555 skb = hdev->reassembly[index];
2556
2557 if (!skb) {
2558 switch (type) {
2559 case HCI_ACLDATA_PKT:
2560 len = HCI_MAX_FRAME_SIZE;
2561 hlen = HCI_ACL_HDR_SIZE;
2562 break;
2563 case HCI_EVENT_PKT:
2564 len = HCI_MAX_EVENT_SIZE;
2565 hlen = HCI_EVENT_HDR_SIZE;
2566 break;
2567 case HCI_SCODATA_PKT:
2568 len = HCI_MAX_SCO_SIZE;
2569 hlen = HCI_SCO_HDR_SIZE;
2570 break;
2571 }
2572
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002573 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302574 if (!skb)
2575 return -ENOMEM;
2576
2577 scb = (void *) skb->cb;
2578 scb->expect = hlen;
2579 scb->pkt_type = type;
2580
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302581 hdev->reassembly[index] = skb;
2582 }
2583
2584 while (count) {
2585 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002586 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302587
2588 memcpy(skb_put(skb, len), data, len);
2589
2590 count -= len;
2591 data += len;
2592 scb->expect -= len;
2593 remain = count;
2594
2595 switch (type) {
2596 case HCI_EVENT_PKT:
2597 if (skb->len == HCI_EVENT_HDR_SIZE) {
2598 struct hci_event_hdr *h = hci_event_hdr(skb);
2599 scb->expect = h->plen;
2600
2601 if (skb_tailroom(skb) < scb->expect) {
2602 kfree_skb(skb);
2603 hdev->reassembly[index] = NULL;
2604 return -ENOMEM;
2605 }
2606 }
2607 break;
2608
2609 case HCI_ACLDATA_PKT:
2610 if (skb->len == HCI_ACL_HDR_SIZE) {
2611 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2612 scb->expect = __le16_to_cpu(h->dlen);
2613
2614 if (skb_tailroom(skb) < scb->expect) {
2615 kfree_skb(skb);
2616 hdev->reassembly[index] = NULL;
2617 return -ENOMEM;
2618 }
2619 }
2620 break;
2621
2622 case HCI_SCODATA_PKT:
2623 if (skb->len == HCI_SCO_HDR_SIZE) {
2624 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2625 scb->expect = h->dlen;
2626
2627 if (skb_tailroom(skb) < scb->expect) {
2628 kfree_skb(skb);
2629 hdev->reassembly[index] = NULL;
2630 return -ENOMEM;
2631 }
2632 }
2633 break;
2634 }
2635
2636 if (scb->expect == 0) {
2637 /* Complete frame */
2638
2639 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002640 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302641
2642 hdev->reassembly[index] = NULL;
2643 return remain;
2644 }
2645 }
2646
2647 return remain;
2648}
2649
Marcel Holtmannef222012007-07-11 06:42:04 +02002650int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2651{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302652 int rem = 0;
2653
Marcel Holtmannef222012007-07-11 06:42:04 +02002654 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2655 return -EILSEQ;
2656
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002657 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002658 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302659 if (rem < 0)
2660 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002661
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302662 data += (count - rem);
2663 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002664 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002665
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302666 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002667}
2668EXPORT_SYMBOL(hci_recv_fragment);
2669
Suraj Sumangala99811512010-07-14 13:02:19 +05302670#define STREAM_REASSEMBLY 0
2671
2672int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2673{
2674 int type;
2675 int rem = 0;
2676
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002677 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302678 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2679
2680 if (!skb) {
2681 struct { char type; } *pkt;
2682
2683 /* Start of the frame */
2684 pkt = data;
2685 type = pkt->type;
2686
2687 data++;
2688 count--;
2689 } else
2690 type = bt_cb(skb)->pkt_type;
2691
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002692 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002693 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302694 if (rem < 0)
2695 return rem;
2696
2697 data += (count - rem);
2698 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002699 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302700
2701 return rem;
2702}
2703EXPORT_SYMBOL(hci_recv_stream_fragment);
2704
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705/* ---- Interface to upper protocols ---- */
2706
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707int hci_register_cb(struct hci_cb *cb)
2708{
2709 BT_DBG("%p name %s", cb, cb->name);
2710
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002711 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002713 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714
2715 return 0;
2716}
2717EXPORT_SYMBOL(hci_register_cb);
2718
2719int hci_unregister_cb(struct hci_cb *cb)
2720{
2721 BT_DBG("%p name %s", cb, cb->name);
2722
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002723 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002725 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726
2727 return 0;
2728}
2729EXPORT_SYMBOL(hci_unregister_cb);
2730
Marcel Holtmann51086992013-10-10 14:54:19 -07002731static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002733 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002735 /* Time stamp */
2736 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002738 /* Send copy to monitor */
2739 hci_send_to_monitor(hdev, skb);
2740
2741 if (atomic_read(&hdev->promisc)) {
2742 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002743 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 }
2745
2746 /* Get rid of skb owner, prior to sending to the driver. */
2747 skb_orphan(skb);
2748
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002749 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002750 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751}
2752
Johan Hedberg3119ae92013-03-05 20:37:44 +02002753void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2754{
2755 skb_queue_head_init(&req->cmd_q);
2756 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002757 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002758}
2759
2760int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2761{
2762 struct hci_dev *hdev = req->hdev;
2763 struct sk_buff *skb;
2764 unsigned long flags;
2765
2766 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2767
Andre Guedes5d73e032013-03-08 11:20:16 -03002768 /* If an error occured during request building, remove all HCI
2769 * commands queued on the HCI request queue.
2770 */
2771 if (req->err) {
2772 skb_queue_purge(&req->cmd_q);
2773 return req->err;
2774 }
2775
Johan Hedberg3119ae92013-03-05 20:37:44 +02002776 /* Do not allow empty requests */
2777 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002778 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002779
2780 skb = skb_peek_tail(&req->cmd_q);
2781 bt_cb(skb)->req.complete = complete;
2782
2783 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2784 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2785 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2786
2787 queue_work(hdev->workqueue, &hdev->cmd_work);
2788
2789 return 0;
2790}
2791
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002792static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002793 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794{
2795 int len = HCI_COMMAND_HDR_SIZE + plen;
2796 struct hci_command_hdr *hdr;
2797 struct sk_buff *skb;
2798
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002800 if (!skb)
2801 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802
2803 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002804 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 hdr->plen = plen;
2806
2807 if (plen)
2808 memcpy(skb_put(skb, plen), param, plen);
2809
2810 BT_DBG("skb len %d", skb->len);
2811
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002812 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002813
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002814 return skb;
2815}
2816
2817/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002818int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2819 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002820{
2821 struct sk_buff *skb;
2822
2823 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2824
2825 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2826 if (!skb) {
2827 BT_ERR("%s no memory for command", hdev->name);
2828 return -ENOMEM;
2829 }
2830
Johan Hedberg11714b32013-03-05 20:37:47 +02002831 /* Stand-alone HCI commands must be flaged as
2832 * single-command requests.
2833 */
2834 bt_cb(skb)->req.start = true;
2835
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002837 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838
2839 return 0;
2840}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841
Johan Hedberg71c76a12013-03-05 20:37:46 +02002842/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002843void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2844 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002845{
2846 struct hci_dev *hdev = req->hdev;
2847 struct sk_buff *skb;
2848
2849 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2850
Andre Guedes34739c12013-03-08 11:20:18 -03002851 /* If an error occured during request building, there is no point in
2852 * queueing the HCI command. We can simply return.
2853 */
2854 if (req->err)
2855 return;
2856
Johan Hedberg71c76a12013-03-05 20:37:46 +02002857 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2858 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002859 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2860 hdev->name, opcode);
2861 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002862 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002863 }
2864
2865 if (skb_queue_empty(&req->cmd_q))
2866 bt_cb(skb)->req.start = true;
2867
Johan Hedberg02350a72013-04-03 21:50:29 +03002868 bt_cb(skb)->req.event = event;
2869
Johan Hedberg71c76a12013-03-05 20:37:46 +02002870 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002871}
2872
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002873void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2874 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002875{
2876 hci_req_add_ev(req, opcode, plen, param, 0);
2877}
2878
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002880void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881{
2882 struct hci_command_hdr *hdr;
2883
2884 if (!hdev->sent_cmd)
2885 return NULL;
2886
2887 hdr = (void *) hdev->sent_cmd->data;
2888
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002889 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 return NULL;
2891
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002892 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893
2894 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2895}
2896
2897/* Send ACL data */
2898static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2899{
2900 struct hci_acl_hdr *hdr;
2901 int len = skb->len;
2902
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002903 skb_push(skb, HCI_ACL_HDR_SIZE);
2904 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002905 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002906 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2907 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908}
2909
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002910static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002911 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002913 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 struct hci_dev *hdev = conn->hdev;
2915 struct sk_buff *list;
2916
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002917 skb->len = skb_headlen(skb);
2918 skb->data_len = 0;
2919
2920 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002921
2922 switch (hdev->dev_type) {
2923 case HCI_BREDR:
2924 hci_add_acl_hdr(skb, conn->handle, flags);
2925 break;
2926 case HCI_AMP:
2927 hci_add_acl_hdr(skb, chan->handle, flags);
2928 break;
2929 default:
2930 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2931 return;
2932 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002933
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002934 list = skb_shinfo(skb)->frag_list;
2935 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 /* Non fragmented */
2937 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2938
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002939 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 } else {
2941 /* Fragmented */
2942 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2943
2944 skb_shinfo(skb)->frag_list = NULL;
2945
2946 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002947 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002949 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002950
2951 flags &= ~ACL_START;
2952 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 do {
2954 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002955
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002956 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002957 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958
2959 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2960
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002961 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962 } while (list);
2963
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002964 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002966}
2967
2968void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2969{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002970 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002971
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002972 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002973
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002974 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002976 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978
2979/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002980void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981{
2982 struct hci_dev *hdev = conn->hdev;
2983 struct hci_sco_hdr hdr;
2984
2985 BT_DBG("%s len %d", hdev->name, skb->len);
2986
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002987 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 hdr.dlen = skb->len;
2989
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002990 skb_push(skb, HCI_SCO_HDR_SIZE);
2991 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002992 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002994 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002995
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002997 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999
3000/* ---- HCI TX task (outgoing data) ---- */
3001
3002/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003003static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3004 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005{
3006 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003007 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003008 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003010 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003012
3013 rcu_read_lock();
3014
3015 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003016 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003018
3019 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3020 continue;
3021
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022 num++;
3023
3024 if (c->sent < min) {
3025 min = c->sent;
3026 conn = c;
3027 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003028
3029 if (hci_conn_num(hdev, type) == num)
3030 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031 }
3032
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003033 rcu_read_unlock();
3034
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003036 int cnt, q;
3037
3038 switch (conn->type) {
3039 case ACL_LINK:
3040 cnt = hdev->acl_cnt;
3041 break;
3042 case SCO_LINK:
3043 case ESCO_LINK:
3044 cnt = hdev->sco_cnt;
3045 break;
3046 case LE_LINK:
3047 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3048 break;
3049 default:
3050 cnt = 0;
3051 BT_ERR("Unknown link type");
3052 }
3053
3054 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 *quote = q ? q : 1;
3056 } else
3057 *quote = 0;
3058
3059 BT_DBG("conn %p quote %d", conn, *quote);
3060 return conn;
3061}
3062
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003063static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064{
3065 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003066 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067
Ville Tervobae1f5d92011-02-10 22:38:53 -03003068 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003070 rcu_read_lock();
3071
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003073 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003074 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003075 BT_ERR("%s killing stalled connection %pMR",
3076 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003077 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 }
3079 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003080
3081 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082}
3083
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003084static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3085 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003086{
3087 struct hci_conn_hash *h = &hdev->conn_hash;
3088 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003089 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003090 struct hci_conn *conn;
3091 int cnt, q, conn_num = 0;
3092
3093 BT_DBG("%s", hdev->name);
3094
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003095 rcu_read_lock();
3096
3097 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003098 struct hci_chan *tmp;
3099
3100 if (conn->type != type)
3101 continue;
3102
3103 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3104 continue;
3105
3106 conn_num++;
3107
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003108 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003109 struct sk_buff *skb;
3110
3111 if (skb_queue_empty(&tmp->data_q))
3112 continue;
3113
3114 skb = skb_peek(&tmp->data_q);
3115 if (skb->priority < cur_prio)
3116 continue;
3117
3118 if (skb->priority > cur_prio) {
3119 num = 0;
3120 min = ~0;
3121 cur_prio = skb->priority;
3122 }
3123
3124 num++;
3125
3126 if (conn->sent < min) {
3127 min = conn->sent;
3128 chan = tmp;
3129 }
3130 }
3131
3132 if (hci_conn_num(hdev, type) == conn_num)
3133 break;
3134 }
3135
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003136 rcu_read_unlock();
3137
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003138 if (!chan)
3139 return NULL;
3140
3141 switch (chan->conn->type) {
3142 case ACL_LINK:
3143 cnt = hdev->acl_cnt;
3144 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003145 case AMP_LINK:
3146 cnt = hdev->block_cnt;
3147 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003148 case SCO_LINK:
3149 case ESCO_LINK:
3150 cnt = hdev->sco_cnt;
3151 break;
3152 case LE_LINK:
3153 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3154 break;
3155 default:
3156 cnt = 0;
3157 BT_ERR("Unknown link type");
3158 }
3159
3160 q = cnt / num;
3161 *quote = q ? q : 1;
3162 BT_DBG("chan %p quote %d", chan, *quote);
3163 return chan;
3164}
3165
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003166static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3167{
3168 struct hci_conn_hash *h = &hdev->conn_hash;
3169 struct hci_conn *conn;
3170 int num = 0;
3171
3172 BT_DBG("%s", hdev->name);
3173
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003174 rcu_read_lock();
3175
3176 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003177 struct hci_chan *chan;
3178
3179 if (conn->type != type)
3180 continue;
3181
3182 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3183 continue;
3184
3185 num++;
3186
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003187 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003188 struct sk_buff *skb;
3189
3190 if (chan->sent) {
3191 chan->sent = 0;
3192 continue;
3193 }
3194
3195 if (skb_queue_empty(&chan->data_q))
3196 continue;
3197
3198 skb = skb_peek(&chan->data_q);
3199 if (skb->priority >= HCI_PRIO_MAX - 1)
3200 continue;
3201
3202 skb->priority = HCI_PRIO_MAX - 1;
3203
3204 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003205 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003206 }
3207
3208 if (hci_conn_num(hdev, type) == num)
3209 break;
3210 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003211
3212 rcu_read_unlock();
3213
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003214}
3215
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003216static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3217{
3218 /* Calculate count of blocks used by this packet */
3219 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3220}
3221
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003222static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224 if (!test_bit(HCI_RAW, &hdev->flags)) {
3225 /* ACL tx timeout must be longer than maximum
3226 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003227 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003228 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003229 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003231}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003233static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003234{
3235 unsigned int cnt = hdev->acl_cnt;
3236 struct hci_chan *chan;
3237 struct sk_buff *skb;
3238 int quote;
3239
3240 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003241
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003242 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003243 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003244 u32 priority = (skb_peek(&chan->data_q))->priority;
3245 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003246 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003247 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003248
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003249 /* Stop if priority has changed */
3250 if (skb->priority < priority)
3251 break;
3252
3253 skb = skb_dequeue(&chan->data_q);
3254
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003255 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003256 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003257
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003258 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259 hdev->acl_last_tx = jiffies;
3260
3261 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003262 chan->sent++;
3263 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 }
3265 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003266
3267 if (cnt != hdev->acl_cnt)
3268 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269}
3270
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003271static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003272{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003273 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003274 struct hci_chan *chan;
3275 struct sk_buff *skb;
3276 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003277 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003278
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003279 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003280
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003281 BT_DBG("%s", hdev->name);
3282
3283 if (hdev->dev_type == HCI_AMP)
3284 type = AMP_LINK;
3285 else
3286 type = ACL_LINK;
3287
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003288 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003289 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003290 u32 priority = (skb_peek(&chan->data_q))->priority;
3291 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3292 int blocks;
3293
3294 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003295 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003296
3297 /* Stop if priority has changed */
3298 if (skb->priority < priority)
3299 break;
3300
3301 skb = skb_dequeue(&chan->data_q);
3302
3303 blocks = __get_blocks(hdev, skb);
3304 if (blocks > hdev->block_cnt)
3305 return;
3306
3307 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003308 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003309
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003310 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003311 hdev->acl_last_tx = jiffies;
3312
3313 hdev->block_cnt -= blocks;
3314 quote -= blocks;
3315
3316 chan->sent += blocks;
3317 chan->conn->sent += blocks;
3318 }
3319 }
3320
3321 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003322 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003323}
3324
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003325static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003326{
3327 BT_DBG("%s", hdev->name);
3328
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003329 /* No ACL link over BR/EDR controller */
3330 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3331 return;
3332
3333 /* No AMP link over AMP controller */
3334 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003335 return;
3336
3337 switch (hdev->flow_ctl_mode) {
3338 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3339 hci_sched_acl_pkt(hdev);
3340 break;
3341
3342 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3343 hci_sched_acl_blk(hdev);
3344 break;
3345 }
3346}
3347
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003349static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350{
3351 struct hci_conn *conn;
3352 struct sk_buff *skb;
3353 int quote;
3354
3355 BT_DBG("%s", hdev->name);
3356
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003357 if (!hci_conn_num(hdev, SCO_LINK))
3358 return;
3359
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3361 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3362 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003363 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364
3365 conn->sent++;
3366 if (conn->sent == ~0)
3367 conn->sent = 0;
3368 }
3369 }
3370}
3371
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003372static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003373{
3374 struct hci_conn *conn;
3375 struct sk_buff *skb;
3376 int quote;
3377
3378 BT_DBG("%s", hdev->name);
3379
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003380 if (!hci_conn_num(hdev, ESCO_LINK))
3381 return;
3382
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003383 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3384 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003385 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3386 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003387 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003388
3389 conn->sent++;
3390 if (conn->sent == ~0)
3391 conn->sent = 0;
3392 }
3393 }
3394}
3395
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003396static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003397{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003398 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003399 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003400 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003401
3402 BT_DBG("%s", hdev->name);
3403
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003404 if (!hci_conn_num(hdev, LE_LINK))
3405 return;
3406
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003407 if (!test_bit(HCI_RAW, &hdev->flags)) {
3408 /* LE tx timeout must be longer than maximum
3409 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003410 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003411 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003412 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003413 }
3414
3415 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003416 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003417 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003418 u32 priority = (skb_peek(&chan->data_q))->priority;
3419 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003420 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003421 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003422
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003423 /* Stop if priority has changed */
3424 if (skb->priority < priority)
3425 break;
3426
3427 skb = skb_dequeue(&chan->data_q);
3428
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003429 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003430 hdev->le_last_tx = jiffies;
3431
3432 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003433 chan->sent++;
3434 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003435 }
3436 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003437
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003438 if (hdev->le_pkts)
3439 hdev->le_cnt = cnt;
3440 else
3441 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003442
3443 if (cnt != tmp)
3444 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003445}
3446
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003447static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003449 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 struct sk_buff *skb;
3451
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003452 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003453 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454
Marcel Holtmann52de5992013-09-03 18:08:38 -07003455 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3456 /* Schedule queues and send stuff to HCI driver */
3457 hci_sched_acl(hdev);
3458 hci_sched_sco(hdev);
3459 hci_sched_esco(hdev);
3460 hci_sched_le(hdev);
3461 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003462
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 /* Send next queued raw (unknown type) packet */
3464 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003465 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466}
3467
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003468/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469
3470/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003471static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472{
3473 struct hci_acl_hdr *hdr = (void *) skb->data;
3474 struct hci_conn *conn;
3475 __u16 handle, flags;
3476
3477 skb_pull(skb, HCI_ACL_HDR_SIZE);
3478
3479 handle = __le16_to_cpu(hdr->handle);
3480 flags = hci_flags(handle);
3481 handle = hci_handle(handle);
3482
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003483 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003484 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485
3486 hdev->stat.acl_rx++;
3487
3488 hci_dev_lock(hdev);
3489 conn = hci_conn_hash_lookup_handle(hdev, handle);
3490 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003491
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003493 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003494
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003496 l2cap_recv_acldata(conn, skb, flags);
3497 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003498 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003499 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003500 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501 }
3502
3503 kfree_skb(skb);
3504}
3505
3506/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003507static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508{
3509 struct hci_sco_hdr *hdr = (void *) skb->data;
3510 struct hci_conn *conn;
3511 __u16 handle;
3512
3513 skb_pull(skb, HCI_SCO_HDR_SIZE);
3514
3515 handle = __le16_to_cpu(hdr->handle);
3516
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003517 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518
3519 hdev->stat.sco_rx++;
3520
3521 hci_dev_lock(hdev);
3522 conn = hci_conn_hash_lookup_handle(hdev, handle);
3523 hci_dev_unlock(hdev);
3524
3525 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003527 sco_recv_scodata(conn, skb);
3528 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003530 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003531 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532 }
3533
3534 kfree_skb(skb);
3535}
3536
Johan Hedberg9238f362013-03-05 20:37:48 +02003537static bool hci_req_is_complete(struct hci_dev *hdev)
3538{
3539 struct sk_buff *skb;
3540
3541 skb = skb_peek(&hdev->cmd_q);
3542 if (!skb)
3543 return true;
3544
3545 return bt_cb(skb)->req.start;
3546}
3547
Johan Hedberg42c6b122013-03-05 20:37:49 +02003548static void hci_resend_last(struct hci_dev *hdev)
3549{
3550 struct hci_command_hdr *sent;
3551 struct sk_buff *skb;
3552 u16 opcode;
3553
3554 if (!hdev->sent_cmd)
3555 return;
3556
3557 sent = (void *) hdev->sent_cmd->data;
3558 opcode = __le16_to_cpu(sent->opcode);
3559 if (opcode == HCI_OP_RESET)
3560 return;
3561
3562 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3563 if (!skb)
3564 return;
3565
3566 skb_queue_head(&hdev->cmd_q, skb);
3567 queue_work(hdev->workqueue, &hdev->cmd_work);
3568}
3569
Johan Hedberg9238f362013-03-05 20:37:48 +02003570void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3571{
3572 hci_req_complete_t req_complete = NULL;
3573 struct sk_buff *skb;
3574 unsigned long flags;
3575
3576 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3577
Johan Hedberg42c6b122013-03-05 20:37:49 +02003578 /* If the completed command doesn't match the last one that was
3579 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003580 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003581 if (!hci_sent_cmd_data(hdev, opcode)) {
3582 /* Some CSR based controllers generate a spontaneous
3583 * reset complete event during init and any pending
3584 * command will never be completed. In such a case we
3585 * need to resend whatever was the last sent
3586 * command.
3587 */
3588 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3589 hci_resend_last(hdev);
3590
Johan Hedberg9238f362013-03-05 20:37:48 +02003591 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003592 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003593
3594 /* If the command succeeded and there's still more commands in
3595 * this request the request is not yet complete.
3596 */
3597 if (!status && !hci_req_is_complete(hdev))
3598 return;
3599
3600 /* If this was the last command in a request the complete
3601 * callback would be found in hdev->sent_cmd instead of the
3602 * command queue (hdev->cmd_q).
3603 */
3604 if (hdev->sent_cmd) {
3605 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003606
3607 if (req_complete) {
3608 /* We must set the complete callback to NULL to
3609 * avoid calling the callback more than once if
3610 * this function gets called again.
3611 */
3612 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3613
Johan Hedberg9238f362013-03-05 20:37:48 +02003614 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003615 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003616 }
3617
3618 /* Remove all pending commands belonging to this request */
3619 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3620 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3621 if (bt_cb(skb)->req.start) {
3622 __skb_queue_head(&hdev->cmd_q, skb);
3623 break;
3624 }
3625
3626 req_complete = bt_cb(skb)->req.complete;
3627 kfree_skb(skb);
3628 }
3629 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3630
3631call_complete:
3632 if (req_complete)
3633 req_complete(hdev, status);
3634}
3635
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003636static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003638 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639 struct sk_buff *skb;
3640
3641 BT_DBG("%s", hdev->name);
3642
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003644 /* Send copy to monitor */
3645 hci_send_to_monitor(hdev, skb);
3646
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647 if (atomic_read(&hdev->promisc)) {
3648 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003649 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650 }
3651
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003652 if (test_bit(HCI_RAW, &hdev->flags) ||
3653 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654 kfree_skb(skb);
3655 continue;
3656 }
3657
3658 if (test_bit(HCI_INIT, &hdev->flags)) {
3659 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003660 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661 case HCI_ACLDATA_PKT:
3662 case HCI_SCODATA_PKT:
3663 kfree_skb(skb);
3664 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003665 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666 }
3667
3668 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003669 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003671 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672 hci_event_packet(hdev, skb);
3673 break;
3674
3675 case HCI_ACLDATA_PKT:
3676 BT_DBG("%s ACL data packet", hdev->name);
3677 hci_acldata_packet(hdev, skb);
3678 break;
3679
3680 case HCI_SCODATA_PKT:
3681 BT_DBG("%s SCO data packet", hdev->name);
3682 hci_scodata_packet(hdev, skb);
3683 break;
3684
3685 default:
3686 kfree_skb(skb);
3687 break;
3688 }
3689 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690}
3691
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003692static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003694 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695 struct sk_buff *skb;
3696
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003697 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3698 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003701 if (atomic_read(&hdev->cmd_cnt)) {
3702 skb = skb_dequeue(&hdev->cmd_q);
3703 if (!skb)
3704 return;
3705
Wei Yongjun7585b972009-02-25 18:29:52 +08003706 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003708 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003709 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003711 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003712 if (test_bit(HCI_RESET, &hdev->flags))
3713 del_timer(&hdev->cmd_timer);
3714 else
3715 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003716 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 } else {
3718 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003719 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003720 }
3721 }
3722}