blob: f7ba81885b42007fd7068776cc9c88ba6a64ddb7 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070032#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Sasha Levin3df92b32012-05-27 22:36:56 +020049/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* ---- HCI notifications ---- */
53
Marcel Holtmann65164552005-10-28 19:20:48 +020054static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
Marcel Holtmann040030e2012-02-20 14:50:37 +010056 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070059/* ---- HCI debugfs entries ---- */
60
Marcel Holtmann70afe0b2013-10-17 17:24:14 -070061static int blacklist_show(struct seq_file *f, void *p)
62{
63 struct hci_dev *hdev = f->private;
64 struct bdaddr_list *b;
65
66 hci_dev_lock(hdev);
67 list_for_each_entry(b, &hdev->blacklist, list)
68 seq_printf(f, "%pMR\n", &b->bdaddr);
69 hci_dev_unlock(hdev);
70
71 return 0;
72}
73
74static int blacklist_open(struct inode *inode, struct file *file)
75{
76 return single_open(file, blacklist_show, inode->i_private);
77}
78
79static const struct file_operations blacklist_fops = {
80 .open = blacklist_open,
81 .read = seq_read,
82 .llseek = seq_lseek,
83 .release = single_release,
84};
85
Marcel Holtmann47219832013-10-17 17:24:15 -070086static int uuids_show(struct seq_file *f, void *p)
87{
88 struct hci_dev *hdev = f->private;
89 struct bt_uuid *uuid;
90
91 hci_dev_lock(hdev);
92 list_for_each_entry(uuid, &hdev->uuids, list) {
93 u32 data0, data5;
94 u16 data1, data2, data3, data4;
95
96 data5 = get_unaligned_le32(uuid);
97 data4 = get_unaligned_le16(uuid + 4);
98 data3 = get_unaligned_le16(uuid + 6);
99 data2 = get_unaligned_le16(uuid + 8);
100 data1 = get_unaligned_le16(uuid + 10);
101 data0 = get_unaligned_le32(uuid + 12);
102
103 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
104 data0, data1, data2, data3, data4, data5);
105 }
106 hci_dev_unlock(hdev);
107
108 return 0;
109}
110
111static int uuids_open(struct inode *inode, struct file *file)
112{
113 return single_open(file, uuids_show, inode->i_private);
114}
115
116static const struct file_operations uuids_fops = {
117 .open = uuids_open,
118 .read = seq_read,
119 .llseek = seq_lseek,
120 .release = single_release,
121};
122
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700123static int inquiry_cache_show(struct seq_file *f, void *p)
124{
125 struct hci_dev *hdev = f->private;
126 struct discovery_state *cache = &hdev->discovery;
127 struct inquiry_entry *e;
128
129 hci_dev_lock(hdev);
130
131 list_for_each_entry(e, &cache->all, all) {
132 struct inquiry_data *data = &e->data;
133 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
134 &data->bdaddr,
135 data->pscan_rep_mode, data->pscan_period_mode,
136 data->pscan_mode, data->dev_class[2],
137 data->dev_class[1], data->dev_class[0],
138 __le16_to_cpu(data->clock_offset),
139 data->rssi, data->ssp_mode, e->timestamp);
140 }
141
142 hci_dev_unlock(hdev);
143
144 return 0;
145}
146
147static int inquiry_cache_open(struct inode *inode, struct file *file)
148{
149 return single_open(file, inquiry_cache_show, inode->i_private);
150}
151
152static const struct file_operations inquiry_cache_fops = {
153 .open = inquiry_cache_open,
154 .read = seq_read,
155 .llseek = seq_lseek,
156 .release = single_release,
157};
158
Marcel Holtmann041000b2013-10-17 12:02:31 -0700159static int voice_setting_get(void *data, u64 *val)
160{
161 struct hci_dev *hdev = data;
162
163 hci_dev_lock(hdev);
164 *val = hdev->voice_setting;
165 hci_dev_unlock(hdev);
166
167 return 0;
168}
169
170DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
171 NULL, "0x%4.4llx\n");
172
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700173static int auto_accept_delay_set(void *data, u64 val)
174{
175 struct hci_dev *hdev = data;
176
177 hci_dev_lock(hdev);
178 hdev->auto_accept_delay = val;
179 hci_dev_unlock(hdev);
180
181 return 0;
182}
183
184static int auto_accept_delay_get(void *data, u64 *val)
185{
186 struct hci_dev *hdev = data;
187
188 hci_dev_lock(hdev);
189 *val = hdev->auto_accept_delay;
190 hci_dev_unlock(hdev);
191
192 return 0;
193}
194
195DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
196 auto_accept_delay_set, "%llu\n");
197
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700198static int static_address_show(struct seq_file *f, void *p)
199{
200 struct hci_dev *hdev = f->private;
201
202 hci_dev_lock(hdev);
203 seq_printf(f, "%pMR\n", &hdev->static_addr);
204 hci_dev_unlock(hdev);
205
206 return 0;
207}
208
209static int static_address_open(struct inode *inode, struct file *file)
210{
211 return single_open(file, static_address_show, inode->i_private);
212}
213
214static const struct file_operations static_address_fops = {
215 .open = static_address_open,
216 .read = seq_read,
217 .llseek = seq_lseek,
218 .release = single_release,
219};
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221/* ---- HCI requests ---- */
222
Johan Hedberg42c6b122013-03-05 20:37:49 +0200223static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200225 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 if (hdev->req_status == HCI_REQ_PEND) {
228 hdev->req_result = result;
229 hdev->req_status = HCI_REQ_DONE;
230 wake_up_interruptible(&hdev->req_wait_q);
231 }
232}
233
234static void hci_req_cancel(struct hci_dev *hdev, int err)
235{
236 BT_DBG("%s err 0x%2.2x", hdev->name, err);
237
238 if (hdev->req_status == HCI_REQ_PEND) {
239 hdev->req_result = err;
240 hdev->req_status = HCI_REQ_CANCELED;
241 wake_up_interruptible(&hdev->req_wait_q);
242 }
243}
244
Fengguang Wu77a63e02013-04-20 16:24:31 +0300245static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
246 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300247{
248 struct hci_ev_cmd_complete *ev;
249 struct hci_event_hdr *hdr;
250 struct sk_buff *skb;
251
252 hci_dev_lock(hdev);
253
254 skb = hdev->recv_evt;
255 hdev->recv_evt = NULL;
256
257 hci_dev_unlock(hdev);
258
259 if (!skb)
260 return ERR_PTR(-ENODATA);
261
262 if (skb->len < sizeof(*hdr)) {
263 BT_ERR("Too short HCI event");
264 goto failed;
265 }
266
267 hdr = (void *) skb->data;
268 skb_pull(skb, HCI_EVENT_HDR_SIZE);
269
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300270 if (event) {
271 if (hdr->evt != event)
272 goto failed;
273 return skb;
274 }
275
Johan Hedberg75e84b72013-04-02 13:35:04 +0300276 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
277 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
278 goto failed;
279 }
280
281 if (skb->len < sizeof(*ev)) {
282 BT_ERR("Too short cmd_complete event");
283 goto failed;
284 }
285
286 ev = (void *) skb->data;
287 skb_pull(skb, sizeof(*ev));
288
289 if (opcode == __le16_to_cpu(ev->opcode))
290 return skb;
291
292 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
293 __le16_to_cpu(ev->opcode));
294
295failed:
296 kfree_skb(skb);
297 return ERR_PTR(-ENODATA);
298}
299
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300300struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300301 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300302{
303 DECLARE_WAITQUEUE(wait, current);
304 struct hci_request req;
305 int err = 0;
306
307 BT_DBG("%s", hdev->name);
308
309 hci_req_init(&req, hdev);
310
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300311 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300312
313 hdev->req_status = HCI_REQ_PEND;
314
315 err = hci_req_run(&req, hci_req_sync_complete);
316 if (err < 0)
317 return ERR_PTR(err);
318
319 add_wait_queue(&hdev->req_wait_q, &wait);
320 set_current_state(TASK_INTERRUPTIBLE);
321
322 schedule_timeout(timeout);
323
324 remove_wait_queue(&hdev->req_wait_q, &wait);
325
326 if (signal_pending(current))
327 return ERR_PTR(-EINTR);
328
329 switch (hdev->req_status) {
330 case HCI_REQ_DONE:
331 err = -bt_to_errno(hdev->req_result);
332 break;
333
334 case HCI_REQ_CANCELED:
335 err = -hdev->req_result;
336 break;
337
338 default:
339 err = -ETIMEDOUT;
340 break;
341 }
342
343 hdev->req_status = hdev->req_result = 0;
344
345 BT_DBG("%s end: err %d", hdev->name, err);
346
347 if (err < 0)
348 return ERR_PTR(err);
349
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300350 return hci_get_cmd_complete(hdev, opcode, event);
351}
352EXPORT_SYMBOL(__hci_cmd_sync_ev);
353
354struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300355 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300356{
357 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300358}
359EXPORT_SYMBOL(__hci_cmd_sync);
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200362static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200363 void (*func)(struct hci_request *req,
364 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200365 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200367 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 DECLARE_WAITQUEUE(wait, current);
369 int err = 0;
370
371 BT_DBG("%s start", hdev->name);
372
Johan Hedberg42c6b122013-03-05 20:37:49 +0200373 hci_req_init(&req, hdev);
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 hdev->req_status = HCI_REQ_PEND;
376
Johan Hedberg42c6b122013-03-05 20:37:49 +0200377 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200378
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 err = hci_req_run(&req, hci_req_sync_complete);
380 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200381 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300382
383 /* ENODATA means the HCI request command queue is empty.
384 * This can happen when a request with conditionals doesn't
385 * trigger any commands to be sent. This is normal behavior
386 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 */
Andre Guedes920c8302013-03-08 11:20:15 -0300388 if (err == -ENODATA)
389 return 0;
390
391 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200392 }
393
Andre Guedesbc4445c2013-03-08 11:20:13 -0300394 add_wait_queue(&hdev->req_wait_q, &wait);
395 set_current_state(TASK_INTERRUPTIBLE);
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 schedule_timeout(timeout);
398
399 remove_wait_queue(&hdev->req_wait_q, &wait);
400
401 if (signal_pending(current))
402 return -EINTR;
403
404 switch (hdev->req_status) {
405 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700406 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 break;
408
409 case HCI_REQ_CANCELED:
410 err = -hdev->req_result;
411 break;
412
413 default:
414 err = -ETIMEDOUT;
415 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700416 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
Johan Hedberga5040ef2011-01-10 13:28:59 +0200418 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
420 BT_DBG("%s end: err %d", hdev->name, err);
421
422 return err;
423}
424
Johan Hedberg01178cd2013-03-05 20:37:41 +0200425static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426 void (*req)(struct hci_request *req,
427 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200428 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429{
430 int ret;
431
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200432 if (!test_bit(HCI_UP, &hdev->flags))
433 return -ENETDOWN;
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 /* Serialize all requests */
436 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200437 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 hci_req_unlock(hdev);
439
440 return ret;
441}
442
Johan Hedberg42c6b122013-03-05 20:37:49 +0200443static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200445 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200448 set_bit(HCI_RESET, &req->hdev->flags);
449 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450}
451
Johan Hedberg42c6b122013-03-05 20:37:49 +0200452static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200454 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200455
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200457 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200459 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200460 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200461
462 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200463 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464}
465
Johan Hedberg42c6b122013-03-05 20:37:49 +0200466static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200467{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200468 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200469
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200470 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200471 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300472
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700473 /* Read Local Supported Commands */
474 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
475
476 /* Read Local Supported Features */
477 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
478
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300479 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200480 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300481
482 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200483 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700484
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700485 /* Read Flow Control Mode */
486 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
487
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700488 /* Read Location Data */
489 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200490}
491
Johan Hedberg42c6b122013-03-05 20:37:49 +0200492static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200493{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200495
496 BT_DBG("%s %ld", hdev->name, opt);
497
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300498 /* Reset */
499 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200500 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300501
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200502 switch (hdev->dev_type) {
503 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200505 break;
506
507 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200508 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200509 break;
510
511 default:
512 BT_ERR("Unknown device type %d", hdev->dev_type);
513 break;
514 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200515}
516
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700519 struct hci_dev *hdev = req->hdev;
520
Johan Hedberg2177bab2013-03-05 20:37:43 +0200521 __le16 param;
522 __u8 flt_type;
523
524 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200525 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200526
527 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529
530 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200531 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200532
533 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700536 /* Read Number of Supported IAC */
537 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
538
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700539 /* Read Current IAC LAP */
540 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
541
Johan Hedberg2177bab2013-03-05 20:37:43 +0200542 /* Clear Event Filters */
543 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200544 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200545
546 /* Connection accept timeout ~20 secs */
547 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200549
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700550 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
551 * but it does not support page scan related HCI commands.
552 */
553 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500554 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
555 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
556 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200557}
558
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200560{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300561 struct hci_dev *hdev = req->hdev;
562
Johan Hedberg2177bab2013-03-05 20:37:43 +0200563 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200564 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200565
566 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200567 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200568
569 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200571
572 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200573 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200574
575 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200576 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300577
578 /* LE-only controllers have LE implicitly enabled */
579 if (!lmp_bredr_capable(hdev))
580 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581}
582
583static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
584{
585 if (lmp_ext_inq_capable(hdev))
586 return 0x02;
587
588 if (lmp_inq_rssi_capable(hdev))
589 return 0x01;
590
591 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
592 hdev->lmp_subver == 0x0757)
593 return 0x01;
594
595 if (hdev->manufacturer == 15) {
596 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
597 return 0x01;
598 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
599 return 0x01;
600 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
601 return 0x01;
602 }
603
604 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
605 hdev->lmp_subver == 0x1805)
606 return 0x01;
607
608 return 0x00;
609}
610
Johan Hedberg42c6b122013-03-05 20:37:49 +0200611static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200612{
613 u8 mode;
614
Johan Hedberg42c6b122013-03-05 20:37:49 +0200615 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200616
Johan Hedberg42c6b122013-03-05 20:37:49 +0200617 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200618}
619
Johan Hedberg42c6b122013-03-05 20:37:49 +0200620static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200621{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200622 struct hci_dev *hdev = req->hdev;
623
Johan Hedberg2177bab2013-03-05 20:37:43 +0200624 /* The second byte is 0xff instead of 0x9f (two reserved bits
625 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
626 * command otherwise.
627 */
628 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
629
630 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
631 * any event mask for pre 1.2 devices.
632 */
633 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
634 return;
635
636 if (lmp_bredr_capable(hdev)) {
637 events[4] |= 0x01; /* Flow Specification Complete */
638 events[4] |= 0x02; /* Inquiry Result with RSSI */
639 events[4] |= 0x04; /* Read Remote Extended Features Complete */
640 events[5] |= 0x08; /* Synchronous Connection Complete */
641 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700642 } else {
643 /* Use a different default for LE-only devices */
644 memset(events, 0, sizeof(events));
645 events[0] |= 0x10; /* Disconnection Complete */
646 events[0] |= 0x80; /* Encryption Change */
647 events[1] |= 0x08; /* Read Remote Version Information Complete */
648 events[1] |= 0x20; /* Command Complete */
649 events[1] |= 0x40; /* Command Status */
650 events[1] |= 0x80; /* Hardware Error */
651 events[2] |= 0x04; /* Number of Completed Packets */
652 events[3] |= 0x02; /* Data Buffer Overflow */
653 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200654 }
655
656 if (lmp_inq_rssi_capable(hdev))
657 events[4] |= 0x02; /* Inquiry Result with RSSI */
658
659 if (lmp_sniffsubr_capable(hdev))
660 events[5] |= 0x20; /* Sniff Subrating */
661
662 if (lmp_pause_enc_capable(hdev))
663 events[5] |= 0x80; /* Encryption Key Refresh Complete */
664
665 if (lmp_ext_inq_capable(hdev))
666 events[5] |= 0x40; /* Extended Inquiry Result */
667
668 if (lmp_no_flush_capable(hdev))
669 events[7] |= 0x01; /* Enhanced Flush Complete */
670
671 if (lmp_lsto_capable(hdev))
672 events[6] |= 0x80; /* Link Supervision Timeout Changed */
673
674 if (lmp_ssp_capable(hdev)) {
675 events[6] |= 0x01; /* IO Capability Request */
676 events[6] |= 0x02; /* IO Capability Response */
677 events[6] |= 0x04; /* User Confirmation Request */
678 events[6] |= 0x08; /* User Passkey Request */
679 events[6] |= 0x10; /* Remote OOB Data Request */
680 events[6] |= 0x20; /* Simple Pairing Complete */
681 events[7] |= 0x04; /* User Passkey Notification */
682 events[7] |= 0x08; /* Keypress Notification */
683 events[7] |= 0x10; /* Remote Host Supported
684 * Features Notification
685 */
686 }
687
688 if (lmp_le_capable(hdev))
689 events[7] |= 0x20; /* LE Meta-Event */
690
Johan Hedberg42c6b122013-03-05 20:37:49 +0200691 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200692
693 if (lmp_le_capable(hdev)) {
694 memset(events, 0, sizeof(events));
695 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200696 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
697 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200698 }
699}
700
Johan Hedberg42c6b122013-03-05 20:37:49 +0200701static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200702{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703 struct hci_dev *hdev = req->hdev;
704
Johan Hedberg2177bab2013-03-05 20:37:43 +0200705 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200706 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300707 else
708 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200709
710 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200711 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200712
Johan Hedberg42c6b122013-03-05 20:37:49 +0200713 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200714
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300715 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
716 * local supported commands HCI command.
717 */
718 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200719 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200720
721 if (lmp_ssp_capable(hdev)) {
722 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
723 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200724 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
725 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200726 } else {
727 struct hci_cp_write_eir cp;
728
729 memset(hdev->eir, 0, sizeof(hdev->eir));
730 memset(&cp, 0, sizeof(cp));
731
Johan Hedberg42c6b122013-03-05 20:37:49 +0200732 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200733 }
734 }
735
736 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200737 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200738
739 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200741
742 if (lmp_ext_feat_capable(hdev)) {
743 struct hci_cp_read_local_ext_features cp;
744
745 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200746 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
747 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200748 }
749
750 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
751 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200752 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
753 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200754 }
755}
756
Johan Hedberg42c6b122013-03-05 20:37:49 +0200757static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200758{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200759 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200760 struct hci_cp_write_def_link_policy cp;
761 u16 link_policy = 0;
762
763 if (lmp_rswitch_capable(hdev))
764 link_policy |= HCI_LP_RSWITCH;
765 if (lmp_hold_capable(hdev))
766 link_policy |= HCI_LP_HOLD;
767 if (lmp_sniff_capable(hdev))
768 link_policy |= HCI_LP_SNIFF;
769 if (lmp_park_capable(hdev))
770 link_policy |= HCI_LP_PARK;
771
772 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200773 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200774}
775
Johan Hedberg42c6b122013-03-05 20:37:49 +0200776static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200777{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200778 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200779 struct hci_cp_write_le_host_supported cp;
780
Johan Hedbergc73eee92013-04-19 18:35:21 +0300781 /* LE-only devices do not support explicit enablement */
782 if (!lmp_bredr_capable(hdev))
783 return;
784
Johan Hedberg2177bab2013-03-05 20:37:43 +0200785 memset(&cp, 0, sizeof(cp));
786
787 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
788 cp.le = 0x01;
789 cp.simul = lmp_le_br_capable(hdev);
790 }
791
792 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200793 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
794 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200795}
796
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300797static void hci_set_event_mask_page_2(struct hci_request *req)
798{
799 struct hci_dev *hdev = req->hdev;
800 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
801
802 /* If Connectionless Slave Broadcast master role is supported
803 * enable all necessary events for it.
804 */
805 if (hdev->features[2][0] & 0x01) {
806 events[1] |= 0x40; /* Triggered Clock Capture */
807 events[1] |= 0x80; /* Synchronization Train Complete */
808 events[2] |= 0x10; /* Slave Page Response Timeout */
809 events[2] |= 0x20; /* CSB Channel Map Change */
810 }
811
812 /* If Connectionless Slave Broadcast slave role is supported
813 * enable all necessary events for it.
814 */
815 if (hdev->features[2][0] & 0x02) {
816 events[2] |= 0x01; /* Synchronization Train Received */
817 events[2] |= 0x02; /* CSB Receive */
818 events[2] |= 0x04; /* CSB Timeout */
819 events[2] |= 0x08; /* Truncated Page Complete */
820 }
821
822 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
823}
824
Johan Hedberg42c6b122013-03-05 20:37:49 +0200825static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200826{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200827 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300828 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200829
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100830 /* Some Broadcom based Bluetooth controllers do not support the
831 * Delete Stored Link Key command. They are clearly indicating its
832 * absence in the bit mask of supported commands.
833 *
834 * Check the supported commands and only if the the command is marked
835 * as supported send it. If not supported assume that the controller
836 * does not have actual support for stored link keys which makes this
837 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700838 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300839 if (hdev->commands[6] & 0x80) {
840 struct hci_cp_delete_stored_link_key cp;
841
842 bacpy(&cp.bdaddr, BDADDR_ANY);
843 cp.delete_all = 0x01;
844 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
845 sizeof(cp), &cp);
846 }
847
Johan Hedberg2177bab2013-03-05 20:37:43 +0200848 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200849 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200850
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700851 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200852 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300853
854 /* Read features beyond page 1 if available */
855 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
856 struct hci_cp_read_local_ext_features cp;
857
858 cp.page = p;
859 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
860 sizeof(cp), &cp);
861 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200862}
863
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300864static void hci_init4_req(struct hci_request *req, unsigned long opt)
865{
866 struct hci_dev *hdev = req->hdev;
867
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300868 /* Set event mask page 2 if the HCI command for it is supported */
869 if (hdev->commands[22] & 0x04)
870 hci_set_event_mask_page_2(req);
871
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300872 /* Check for Synchronization Train support */
873 if (hdev->features[2][0] & 0x04)
874 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
875}
876
Johan Hedberg2177bab2013-03-05 20:37:43 +0200877static int __hci_init(struct hci_dev *hdev)
878{
879 int err;
880
881 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
882 if (err < 0)
883 return err;
884
885 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
886 * BR/EDR/LE type controllers. AMP controllers only need the
887 * first stage init.
888 */
889 if (hdev->dev_type != HCI_BREDR)
890 return 0;
891
892 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
893 if (err < 0)
894 return err;
895
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300896 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
897 if (err < 0)
898 return err;
899
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700900 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
901 if (err < 0)
902 return err;
903
904 /* Only create debugfs entries during the initial setup
905 * phase and not every time the controller gets powered on.
906 */
907 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
908 return 0;
909
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700910 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
911 &blacklist_fops);
912
Marcel Holtmann47219832013-10-17 17:24:15 -0700913 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
914
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700915 if (lmp_bredr_capable(hdev)) {
916 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
917 hdev, &inquiry_cache_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -0700918 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
919 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700920 }
921
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700922 if (lmp_ssp_capable(hdev))
923 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
924 hdev, &auto_accept_delay_fops);
925
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700926 if (lmp_le_capable(hdev))
927 debugfs_create_file("static_address", 0444, hdev->debugfs,
928 hdev, &static_address_fops);
929
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700930 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200931}
932
Johan Hedberg42c6b122013-03-05 20:37:49 +0200933static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934{
935 __u8 scan = opt;
936
Johan Hedberg42c6b122013-03-05 20:37:49 +0200937 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200940 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941}
942
Johan Hedberg42c6b122013-03-05 20:37:49 +0200943static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944{
945 __u8 auth = opt;
946
Johan Hedberg42c6b122013-03-05 20:37:49 +0200947 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
949 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
952
Johan Hedberg42c6b122013-03-05 20:37:49 +0200953static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954{
955 __u8 encrypt = opt;
956
Johan Hedberg42c6b122013-03-05 20:37:49 +0200957 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200959 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200960 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961}
962
Johan Hedberg42c6b122013-03-05 20:37:49 +0200963static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200964{
965 __le16 policy = cpu_to_le16(opt);
966
Johan Hedberg42c6b122013-03-05 20:37:49 +0200967 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200968
969 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200971}
972
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900973/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 * Device is held on return. */
975struct hci_dev *hci_dev_get(int index)
976{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200977 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978
979 BT_DBG("%d", index);
980
981 if (index < 0)
982 return NULL;
983
984 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200985 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (d->id == index) {
987 hdev = hci_dev_hold(d);
988 break;
989 }
990 }
991 read_unlock(&hci_dev_list_lock);
992 return hdev;
993}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
995/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200996
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200997bool hci_discovery_active(struct hci_dev *hdev)
998{
999 struct discovery_state *discov = &hdev->discovery;
1000
Andre Guedes6fbe1952012-02-03 17:47:58 -03001001 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001002 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001003 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001004 return true;
1005
Andre Guedes6fbe1952012-02-03 17:47:58 -03001006 default:
1007 return false;
1008 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001009}
1010
Johan Hedbergff9ef572012-01-04 14:23:45 +02001011void hci_discovery_set_state(struct hci_dev *hdev, int state)
1012{
1013 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1014
1015 if (hdev->discovery.state == state)
1016 return;
1017
1018 switch (state) {
1019 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -03001020 if (hdev->discovery.state != DISCOVERY_STARTING)
1021 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001022 break;
1023 case DISCOVERY_STARTING:
1024 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001025 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001026 mgmt_discovering(hdev, 1);
1027 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001028 case DISCOVERY_RESOLVING:
1029 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001030 case DISCOVERY_STOPPING:
1031 break;
1032 }
1033
1034 hdev->discovery.state = state;
1035}
1036
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001037void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038{
Johan Hedberg30883512012-01-04 14:16:21 +02001039 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001040 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
Johan Hedberg561aafb2012-01-04 13:31:59 +02001042 list_for_each_entry_safe(p, n, &cache->all, all) {
1043 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001044 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001046
1047 INIT_LIST_HEAD(&cache->unknown);
1048 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049}
1050
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001051struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1052 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053{
Johan Hedberg30883512012-01-04 14:16:21 +02001054 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 struct inquiry_entry *e;
1056
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001057 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
Johan Hedberg561aafb2012-01-04 13:31:59 +02001059 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001061 return e;
1062 }
1063
1064 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065}
1066
Johan Hedberg561aafb2012-01-04 13:31:59 +02001067struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001068 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001069{
Johan Hedberg30883512012-01-04 14:16:21 +02001070 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001071 struct inquiry_entry *e;
1072
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001073 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001074
1075 list_for_each_entry(e, &cache->unknown, list) {
1076 if (!bacmp(&e->data.bdaddr, bdaddr))
1077 return e;
1078 }
1079
1080 return NULL;
1081}
1082
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001083struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001084 bdaddr_t *bdaddr,
1085 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001086{
1087 struct discovery_state *cache = &hdev->discovery;
1088 struct inquiry_entry *e;
1089
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001090 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001091
1092 list_for_each_entry(e, &cache->resolve, list) {
1093 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1094 return e;
1095 if (!bacmp(&e->data.bdaddr, bdaddr))
1096 return e;
1097 }
1098
1099 return NULL;
1100}
1101
Johan Hedberga3d4e202012-01-09 00:53:02 +02001102void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001103 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001104{
1105 struct discovery_state *cache = &hdev->discovery;
1106 struct list_head *pos = &cache->resolve;
1107 struct inquiry_entry *p;
1108
1109 list_del(&ie->list);
1110
1111 list_for_each_entry(p, &cache->resolve, list) {
1112 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001113 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001114 break;
1115 pos = &p->list;
1116 }
1117
1118 list_add(&ie->list, pos);
1119}
1120
Johan Hedberg31754052012-01-04 13:39:52 +02001121bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001122 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123{
Johan Hedberg30883512012-01-04 14:16:21 +02001124 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001125 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001127 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
Szymon Janc2b2fec42012-11-20 11:38:54 +01001129 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1130
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001131 if (ssp)
1132 *ssp = data->ssp_mode;
1133
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001134 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001135 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001136 if (ie->data.ssp_mode && ssp)
1137 *ssp = true;
1138
Johan Hedberga3d4e202012-01-09 00:53:02 +02001139 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001140 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001141 ie->data.rssi = data->rssi;
1142 hci_inquiry_cache_update_resolve(hdev, ie);
1143 }
1144
Johan Hedberg561aafb2012-01-04 13:31:59 +02001145 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001146 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001147
Johan Hedberg561aafb2012-01-04 13:31:59 +02001148 /* Entry not in the cache. Add new one. */
1149 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1150 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001151 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001152
1153 list_add(&ie->all, &cache->all);
1154
1155 if (name_known) {
1156 ie->name_state = NAME_KNOWN;
1157 } else {
1158 ie->name_state = NAME_NOT_KNOWN;
1159 list_add(&ie->list, &cache->unknown);
1160 }
1161
1162update:
1163 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001164 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001165 ie->name_state = NAME_KNOWN;
1166 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 }
1168
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001169 memcpy(&ie->data, data, sizeof(*data));
1170 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001172
1173 if (ie->name_state == NAME_NOT_KNOWN)
1174 return false;
1175
1176 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177}
1178
1179static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1180{
Johan Hedberg30883512012-01-04 14:16:21 +02001181 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 struct inquiry_info *info = (struct inquiry_info *) buf;
1183 struct inquiry_entry *e;
1184 int copied = 0;
1185
Johan Hedberg561aafb2012-01-04 13:31:59 +02001186 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001188
1189 if (copied >= num)
1190 break;
1191
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 bacpy(&info->bdaddr, &data->bdaddr);
1193 info->pscan_rep_mode = data->pscan_rep_mode;
1194 info->pscan_period_mode = data->pscan_period_mode;
1195 info->pscan_mode = data->pscan_mode;
1196 memcpy(info->dev_class, data->dev_class, 3);
1197 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001198
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001200 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 }
1202
1203 BT_DBG("cache %p, copied %d", cache, copied);
1204 return copied;
1205}
1206
Johan Hedberg42c6b122013-03-05 20:37:49 +02001207static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208{
1209 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001210 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 struct hci_cp_inquiry cp;
1212
1213 BT_DBG("%s", hdev->name);
1214
1215 if (test_bit(HCI_INQUIRY, &hdev->flags))
1216 return;
1217
1218 /* Start Inquiry */
1219 memcpy(&cp.lap, &ir->lap, 3);
1220 cp.length = ir->length;
1221 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001222 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223}
1224
Andre Guedes3e13fa12013-03-27 20:04:56 -03001225static int wait_inquiry(void *word)
1226{
1227 schedule();
1228 return signal_pending(current);
1229}
1230
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231int hci_inquiry(void __user *arg)
1232{
1233 __u8 __user *ptr = arg;
1234 struct hci_inquiry_req ir;
1235 struct hci_dev *hdev;
1236 int err = 0, do_inquiry = 0, max_rsp;
1237 long timeo;
1238 __u8 *buf;
1239
1240 if (copy_from_user(&ir, ptr, sizeof(ir)))
1241 return -EFAULT;
1242
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001243 hdev = hci_dev_get(ir.dev_id);
1244 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 return -ENODEV;
1246
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001247 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1248 err = -EBUSY;
1249 goto done;
1250 }
1251
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001252 if (hdev->dev_type != HCI_BREDR) {
1253 err = -EOPNOTSUPP;
1254 goto done;
1255 }
1256
Johan Hedberg56f87902013-10-02 13:43:13 +03001257 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1258 err = -EOPNOTSUPP;
1259 goto done;
1260 }
1261
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001262 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001263 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001264 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001265 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 do_inquiry = 1;
1267 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001268 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269
Marcel Holtmann04837f62006-07-03 10:02:33 +02001270 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001271
1272 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001273 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1274 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001275 if (err < 0)
1276 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001277
1278 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1279 * cleared). If it is interrupted by a signal, return -EINTR.
1280 */
1281 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1282 TASK_INTERRUPTIBLE))
1283 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001286 /* for unlimited number of responses we will use buffer with
1287 * 255 entries
1288 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1290
1291 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1292 * copy it to the user space.
1293 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001294 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001295 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 err = -ENOMEM;
1297 goto done;
1298 }
1299
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001300 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001302 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
1304 BT_DBG("num_rsp %d", ir.num_rsp);
1305
1306 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1307 ptr += sizeof(ir);
1308 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001309 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001311 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 err = -EFAULT;
1313
1314 kfree(buf);
1315
1316done:
1317 hci_dev_put(hdev);
1318 return err;
1319}
1320
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001321static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 int ret = 0;
1324
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 BT_DBG("%s %p", hdev->name, hdev);
1326
1327 hci_req_lock(hdev);
1328
Johan Hovold94324962012-03-15 14:48:41 +01001329 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1330 ret = -ENODEV;
1331 goto done;
1332 }
1333
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001334 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1335 /* Check for rfkill but allow the HCI setup stage to
1336 * proceed (which in itself doesn't cause any RF activity).
1337 */
1338 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1339 ret = -ERFKILL;
1340 goto done;
1341 }
1342
1343 /* Check for valid public address or a configured static
1344 * random adddress, but let the HCI setup proceed to
1345 * be able to determine if there is a public address
1346 * or not.
1347 *
1348 * This check is only valid for BR/EDR controllers
1349 * since AMP controllers do not have an address.
1350 */
1351 if (hdev->dev_type == HCI_BREDR &&
1352 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1353 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1354 ret = -EADDRNOTAVAIL;
1355 goto done;
1356 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001357 }
1358
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 if (test_bit(HCI_UP, &hdev->flags)) {
1360 ret = -EALREADY;
1361 goto done;
1362 }
1363
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 if (hdev->open(hdev)) {
1365 ret = -EIO;
1366 goto done;
1367 }
1368
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001369 atomic_set(&hdev->cmd_cnt, 1);
1370 set_bit(HCI_INIT, &hdev->flags);
1371
1372 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1373 ret = hdev->setup(hdev);
1374
1375 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001376 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1377 set_bit(HCI_RAW, &hdev->flags);
1378
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001379 if (!test_bit(HCI_RAW, &hdev->flags) &&
1380 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001381 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 }
1383
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001384 clear_bit(HCI_INIT, &hdev->flags);
1385
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 if (!ret) {
1387 hci_dev_hold(hdev);
1388 set_bit(HCI_UP, &hdev->flags);
1389 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001390 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001391 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001392 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001393 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001394 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001395 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001396 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001397 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001399 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001400 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001401 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
1403 skb_queue_purge(&hdev->cmd_q);
1404 skb_queue_purge(&hdev->rx_q);
1405
1406 if (hdev->flush)
1407 hdev->flush(hdev);
1408
1409 if (hdev->sent_cmd) {
1410 kfree_skb(hdev->sent_cmd);
1411 hdev->sent_cmd = NULL;
1412 }
1413
1414 hdev->close(hdev);
1415 hdev->flags = 0;
1416 }
1417
1418done:
1419 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 return ret;
1421}
1422
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001423/* ---- HCI ioctl helpers ---- */
1424
1425int hci_dev_open(__u16 dev)
1426{
1427 struct hci_dev *hdev;
1428 int err;
1429
1430 hdev = hci_dev_get(dev);
1431 if (!hdev)
1432 return -ENODEV;
1433
Johan Hedberge1d08f42013-10-01 22:44:50 +03001434 /* We need to ensure that no other power on/off work is pending
1435 * before proceeding to call hci_dev_do_open. This is
1436 * particularly important if the setup procedure has not yet
1437 * completed.
1438 */
1439 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1440 cancel_delayed_work(&hdev->power_off);
1441
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001442 /* After this call it is guaranteed that the setup procedure
1443 * has finished. This means that error conditions like RFKILL
1444 * or no valid public or static random address apply.
1445 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001446 flush_workqueue(hdev->req_workqueue);
1447
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001448 err = hci_dev_do_open(hdev);
1449
1450 hci_dev_put(hdev);
1451
1452 return err;
1453}
1454
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455static int hci_dev_do_close(struct hci_dev *hdev)
1456{
1457 BT_DBG("%s %p", hdev->name, hdev);
1458
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001459 cancel_delayed_work(&hdev->power_off);
1460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 hci_req_cancel(hdev, ENODEV);
1462 hci_req_lock(hdev);
1463
1464 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001465 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 hci_req_unlock(hdev);
1467 return 0;
1468 }
1469
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001470 /* Flush RX and TX works */
1471 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001472 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001474 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001475 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001476 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001477 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001478 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001479 }
1480
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001481 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001482 cancel_delayed_work(&hdev->service_cache);
1483
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001484 cancel_delayed_work_sync(&hdev->le_scan_disable);
1485
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001486 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001487 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001489 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
1491 hci_notify(hdev, HCI_DEV_DOWN);
1492
1493 if (hdev->flush)
1494 hdev->flush(hdev);
1495
1496 /* Reset device */
1497 skb_queue_purge(&hdev->cmd_q);
1498 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001499 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001500 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001501 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001503 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 clear_bit(HCI_INIT, &hdev->flags);
1505 }
1506
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001507 /* flush cmd work */
1508 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
1510 /* Drop queues */
1511 skb_queue_purge(&hdev->rx_q);
1512 skb_queue_purge(&hdev->cmd_q);
1513 skb_queue_purge(&hdev->raw_q);
1514
1515 /* Drop last sent command */
1516 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001517 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 kfree_skb(hdev->sent_cmd);
1519 hdev->sent_cmd = NULL;
1520 }
1521
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001522 kfree_skb(hdev->recv_evt);
1523 hdev->recv_evt = NULL;
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 /* After this point our queues are empty
1526 * and no tasks are scheduled. */
1527 hdev->close(hdev);
1528
Johan Hedberg35b973c2013-03-15 17:06:59 -05001529 /* Clear flags */
1530 hdev->flags = 0;
1531 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1532
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001533 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1534 if (hdev->dev_type == HCI_BREDR) {
1535 hci_dev_lock(hdev);
1536 mgmt_powered(hdev, 0);
1537 hci_dev_unlock(hdev);
1538 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001539 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001540
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001541 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001542 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001543
Johan Hedberge59fda82012-02-22 18:11:53 +02001544 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001545 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001546
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 hci_req_unlock(hdev);
1548
1549 hci_dev_put(hdev);
1550 return 0;
1551}
1552
1553int hci_dev_close(__u16 dev)
1554{
1555 struct hci_dev *hdev;
1556 int err;
1557
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001558 hdev = hci_dev_get(dev);
1559 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001561
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001562 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1563 err = -EBUSY;
1564 goto done;
1565 }
1566
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001567 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1568 cancel_delayed_work(&hdev->power_off);
1569
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001571
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001572done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 hci_dev_put(hdev);
1574 return err;
1575}
1576
1577int hci_dev_reset(__u16 dev)
1578{
1579 struct hci_dev *hdev;
1580 int ret = 0;
1581
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001582 hdev = hci_dev_get(dev);
1583 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 return -ENODEV;
1585
1586 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
Marcel Holtmann808a0492013-08-26 20:57:58 -07001588 if (!test_bit(HCI_UP, &hdev->flags)) {
1589 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001591 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001593 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1594 ret = -EBUSY;
1595 goto done;
1596 }
1597
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 /* Drop queues */
1599 skb_queue_purge(&hdev->rx_q);
1600 skb_queue_purge(&hdev->cmd_q);
1601
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001602 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001603 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001605 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606
1607 if (hdev->flush)
1608 hdev->flush(hdev);
1609
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001610 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001611 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
1613 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001614 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
1616done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 hci_req_unlock(hdev);
1618 hci_dev_put(hdev);
1619 return ret;
1620}
1621
1622int hci_dev_reset_stat(__u16 dev)
1623{
1624 struct hci_dev *hdev;
1625 int ret = 0;
1626
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001627 hdev = hci_dev_get(dev);
1628 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 return -ENODEV;
1630
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001631 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1632 ret = -EBUSY;
1633 goto done;
1634 }
1635
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1637
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001638done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 return ret;
1641}
1642
1643int hci_dev_cmd(unsigned int cmd, void __user *arg)
1644{
1645 struct hci_dev *hdev;
1646 struct hci_dev_req dr;
1647 int err = 0;
1648
1649 if (copy_from_user(&dr, arg, sizeof(dr)))
1650 return -EFAULT;
1651
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001652 hdev = hci_dev_get(dr.dev_id);
1653 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 return -ENODEV;
1655
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001656 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1657 err = -EBUSY;
1658 goto done;
1659 }
1660
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001661 if (hdev->dev_type != HCI_BREDR) {
1662 err = -EOPNOTSUPP;
1663 goto done;
1664 }
1665
Johan Hedberg56f87902013-10-02 13:43:13 +03001666 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1667 err = -EOPNOTSUPP;
1668 goto done;
1669 }
1670
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 switch (cmd) {
1672 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001673 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1674 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 break;
1676
1677 case HCISETENCRYPT:
1678 if (!lmp_encrypt_capable(hdev)) {
1679 err = -EOPNOTSUPP;
1680 break;
1681 }
1682
1683 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1684 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001685 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1686 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 if (err)
1688 break;
1689 }
1690
Johan Hedberg01178cd2013-03-05 20:37:41 +02001691 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1692 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 break;
1694
1695 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001696 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1697 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 break;
1699
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001700 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001701 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1702 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001703 break;
1704
1705 case HCISETLINKMODE:
1706 hdev->link_mode = ((__u16) dr.dev_opt) &
1707 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1708 break;
1709
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 case HCISETPTYPE:
1711 hdev->pkt_type = (__u16) dr.dev_opt;
1712 break;
1713
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001715 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1716 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 break;
1718
1719 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001720 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1721 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 break;
1723
1724 default:
1725 err = -EINVAL;
1726 break;
1727 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001728
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001729done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 hci_dev_put(hdev);
1731 return err;
1732}
1733
1734int hci_get_dev_list(void __user *arg)
1735{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001736 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 struct hci_dev_list_req *dl;
1738 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 int n = 0, size, err;
1740 __u16 dev_num;
1741
1742 if (get_user(dev_num, (__u16 __user *) arg))
1743 return -EFAULT;
1744
1745 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1746 return -EINVAL;
1747
1748 size = sizeof(*dl) + dev_num * sizeof(*dr);
1749
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001750 dl = kzalloc(size, GFP_KERNEL);
1751 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 return -ENOMEM;
1753
1754 dr = dl->dev_req;
1755
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001756 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001757 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001758 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001759 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001760
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001761 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1762 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001763
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 (dr + n)->dev_id = hdev->id;
1765 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001766
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 if (++n >= dev_num)
1768 break;
1769 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001770 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
1772 dl->dev_num = n;
1773 size = sizeof(*dl) + n * sizeof(*dr);
1774
1775 err = copy_to_user(arg, dl, size);
1776 kfree(dl);
1777
1778 return err ? -EFAULT : 0;
1779}
1780
1781int hci_get_dev_info(void __user *arg)
1782{
1783 struct hci_dev *hdev;
1784 struct hci_dev_info di;
1785 int err = 0;
1786
1787 if (copy_from_user(&di, arg, sizeof(di)))
1788 return -EFAULT;
1789
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001790 hdev = hci_dev_get(di.dev_id);
1791 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 return -ENODEV;
1793
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001794 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001795 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001796
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001797 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1798 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001799
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 strcpy(di.name, hdev->name);
1801 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001802 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 di.flags = hdev->flags;
1804 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001805 if (lmp_bredr_capable(hdev)) {
1806 di.acl_mtu = hdev->acl_mtu;
1807 di.acl_pkts = hdev->acl_pkts;
1808 di.sco_mtu = hdev->sco_mtu;
1809 di.sco_pkts = hdev->sco_pkts;
1810 } else {
1811 di.acl_mtu = hdev->le_mtu;
1812 di.acl_pkts = hdev->le_pkts;
1813 di.sco_mtu = 0;
1814 di.sco_pkts = 0;
1815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 di.link_policy = hdev->link_policy;
1817 di.link_mode = hdev->link_mode;
1818
1819 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1820 memcpy(&di.features, &hdev->features, sizeof(di.features));
1821
1822 if (copy_to_user(arg, &di, sizeof(di)))
1823 err = -EFAULT;
1824
1825 hci_dev_put(hdev);
1826
1827 return err;
1828}
1829
1830/* ---- Interface to HCI drivers ---- */
1831
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001832static int hci_rfkill_set_block(void *data, bool blocked)
1833{
1834 struct hci_dev *hdev = data;
1835
1836 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1837
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001838 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1839 return -EBUSY;
1840
Johan Hedberg5e130362013-09-13 08:58:17 +03001841 if (blocked) {
1842 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001843 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1844 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001845 } else {
1846 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001847 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001848
1849 return 0;
1850}
1851
1852static const struct rfkill_ops hci_rfkill_ops = {
1853 .set_block = hci_rfkill_set_block,
1854};
1855
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001856static void hci_power_on(struct work_struct *work)
1857{
1858 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001859 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001860
1861 BT_DBG("%s", hdev->name);
1862
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001863 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001864 if (err < 0) {
1865 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001866 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001867 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001868
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001869 /* During the HCI setup phase, a few error conditions are
1870 * ignored and they need to be checked now. If they are still
1871 * valid, it is important to turn the device back off.
1872 */
1873 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1874 (hdev->dev_type == HCI_BREDR &&
1875 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1876 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001877 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1878 hci_dev_do_close(hdev);
1879 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001880 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1881 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001882 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001883
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001884 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001885 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001886}
1887
1888static void hci_power_off(struct work_struct *work)
1889{
Johan Hedberg32435532011-11-07 22:16:04 +02001890 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001891 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001892
1893 BT_DBG("%s", hdev->name);
1894
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001895 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001896}
1897
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001898static void hci_discov_off(struct work_struct *work)
1899{
1900 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001901
1902 hdev = container_of(work, struct hci_dev, discov_off.work);
1903
1904 BT_DBG("%s", hdev->name);
1905
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07001906 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001907}
1908
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001909int hci_uuids_clear(struct hci_dev *hdev)
1910{
Johan Hedberg48210022013-01-27 00:31:28 +02001911 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001912
Johan Hedberg48210022013-01-27 00:31:28 +02001913 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1914 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001915 kfree(uuid);
1916 }
1917
1918 return 0;
1919}
1920
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001921int hci_link_keys_clear(struct hci_dev *hdev)
1922{
1923 struct list_head *p, *n;
1924
1925 list_for_each_safe(p, n, &hdev->link_keys) {
1926 struct link_key *key;
1927
1928 key = list_entry(p, struct link_key, list);
1929
1930 list_del(p);
1931 kfree(key);
1932 }
1933
1934 return 0;
1935}
1936
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001937int hci_smp_ltks_clear(struct hci_dev *hdev)
1938{
1939 struct smp_ltk *k, *tmp;
1940
1941 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1942 list_del(&k->list);
1943 kfree(k);
1944 }
1945
1946 return 0;
1947}
1948
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001949struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1950{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001951 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001952
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001953 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001954 if (bacmp(bdaddr, &k->bdaddr) == 0)
1955 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001956
1957 return NULL;
1958}
1959
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301960static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001961 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001962{
1963 /* Legacy key */
1964 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301965 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001966
1967 /* Debug keys are insecure so don't store them persistently */
1968 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301969 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001970
1971 /* Changed combination key and there's no previous one */
1972 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301973 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001974
1975 /* Security mode 3 case */
1976 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301977 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001978
1979 /* Neither local nor remote side had no-bonding as requirement */
1980 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301981 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001982
1983 /* Local side had dedicated bonding as requirement */
1984 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301985 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001986
1987 /* Remote side had dedicated bonding as requirement */
1988 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301989 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001990
1991 /* If none of the above criteria match, then don't store the key
1992 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301993 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001994}
1995
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001996struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001997{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001998 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001999
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002000 list_for_each_entry(k, &hdev->long_term_keys, list) {
2001 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002002 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002003 continue;
2004
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002005 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002006 }
2007
2008 return NULL;
2009}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002010
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002011struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002012 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002013{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002014 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002015
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002016 list_for_each_entry(k, &hdev->long_term_keys, list)
2017 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002018 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002019 return k;
2020
2021 return NULL;
2022}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002023
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002024int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002025 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002026{
2027 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302028 u8 old_key_type;
2029 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002030
2031 old_key = hci_find_link_key(hdev, bdaddr);
2032 if (old_key) {
2033 old_key_type = old_key->type;
2034 key = old_key;
2035 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002036 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002037 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2038 if (!key)
2039 return -ENOMEM;
2040 list_add(&key->list, &hdev->link_keys);
2041 }
2042
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002043 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002044
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002045 /* Some buggy controller combinations generate a changed
2046 * combination key for legacy pairing even when there's no
2047 * previous key */
2048 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002049 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002050 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002051 if (conn)
2052 conn->key_type = type;
2053 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002054
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002055 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002056 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002057 key->pin_len = pin_len;
2058
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002059 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002060 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002061 else
2062 key->type = type;
2063
Johan Hedberg4df378a2011-04-28 11:29:03 -07002064 if (!new_key)
2065 return 0;
2066
2067 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2068
Johan Hedberg744cf192011-11-08 20:40:14 +02002069 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07002070
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05302071 if (conn)
2072 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002073
2074 return 0;
2075}
2076
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002077int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02002078 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002079 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002080{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002081 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002082
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002083 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2084 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002085
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002086 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2087 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002088 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002089 else {
2090 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002091 if (!key)
2092 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002093 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002094 }
2095
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002096 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002097 key->bdaddr_type = addr_type;
2098 memcpy(key->val, tk, sizeof(key->val));
2099 key->authenticated = authenticated;
2100 key->ediv = ediv;
2101 key->enc_size = enc_size;
2102 key->type = type;
2103 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002104
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002105 if (!new_key)
2106 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002107
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03002108 if (type & HCI_SMP_LTK)
2109 mgmt_new_ltk(hdev, key, 1);
2110
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002111 return 0;
2112}
2113
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002114int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2115{
2116 struct link_key *key;
2117
2118 key = hci_find_link_key(hdev, bdaddr);
2119 if (!key)
2120 return -ENOENT;
2121
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002122 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002123
2124 list_del(&key->list);
2125 kfree(key);
2126
2127 return 0;
2128}
2129
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002130int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2131{
2132 struct smp_ltk *k, *tmp;
2133
2134 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2135 if (bacmp(bdaddr, &k->bdaddr))
2136 continue;
2137
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002138 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002139
2140 list_del(&k->list);
2141 kfree(k);
2142 }
2143
2144 return 0;
2145}
2146
Ville Tervo6bd32322011-02-16 16:32:41 +02002147/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002148static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002149{
2150 struct hci_dev *hdev = (void *) arg;
2151
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002152 if (hdev->sent_cmd) {
2153 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2154 u16 opcode = __le16_to_cpu(sent->opcode);
2155
2156 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2157 } else {
2158 BT_ERR("%s command tx timeout", hdev->name);
2159 }
2160
Ville Tervo6bd32322011-02-16 16:32:41 +02002161 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002162 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002163}
2164
Szymon Janc2763eda2011-03-22 13:12:22 +01002165struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002166 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002167{
2168 struct oob_data *data;
2169
2170 list_for_each_entry(data, &hdev->remote_oob_data, list)
2171 if (bacmp(bdaddr, &data->bdaddr) == 0)
2172 return data;
2173
2174 return NULL;
2175}
2176
2177int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2178{
2179 struct oob_data *data;
2180
2181 data = hci_find_remote_oob_data(hdev, bdaddr);
2182 if (!data)
2183 return -ENOENT;
2184
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002185 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002186
2187 list_del(&data->list);
2188 kfree(data);
2189
2190 return 0;
2191}
2192
2193int hci_remote_oob_data_clear(struct hci_dev *hdev)
2194{
2195 struct oob_data *data, *n;
2196
2197 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2198 list_del(&data->list);
2199 kfree(data);
2200 }
2201
2202 return 0;
2203}
2204
2205int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002206 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002207{
2208 struct oob_data *data;
2209
2210 data = hci_find_remote_oob_data(hdev, bdaddr);
2211
2212 if (!data) {
2213 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2214 if (!data)
2215 return -ENOMEM;
2216
2217 bacpy(&data->bdaddr, bdaddr);
2218 list_add(&data->list, &hdev->remote_oob_data);
2219 }
2220
2221 memcpy(data->hash, hash, sizeof(data->hash));
2222 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2223
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002224 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002225
2226 return 0;
2227}
2228
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002229struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2230 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002231{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002232 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002233
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002234 list_for_each_entry(b, &hdev->blacklist, list) {
2235 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002236 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002237 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03002238
2239 return NULL;
2240}
2241
2242int hci_blacklist_clear(struct hci_dev *hdev)
2243{
2244 struct list_head *p, *n;
2245
2246 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002247 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002248
2249 list_del(p);
2250 kfree(b);
2251 }
2252
2253 return 0;
2254}
2255
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002256int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002257{
2258 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002259
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002260 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03002261 return -EBADF;
2262
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002263 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03002264 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002265
2266 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002267 if (!entry)
2268 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002269
2270 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002271 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002272
2273 list_add(&entry->list, &hdev->blacklist);
2274
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002275 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002276}
2277
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002278int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002279{
2280 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002281
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002282 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julku5e762442011-08-25 16:48:02 +03002283 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002284
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07002285 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002286 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002287 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002288
2289 list_del(&entry->list);
2290 kfree(entry);
2291
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002292 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002293}
2294
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002295static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002296{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002297 if (status) {
2298 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002299
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002300 hci_dev_lock(hdev);
2301 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2302 hci_dev_unlock(hdev);
2303 return;
2304 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002305}
2306
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002307static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002308{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002309 /* General inquiry access code (GIAC) */
2310 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2311 struct hci_request req;
2312 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002313 int err;
2314
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002315 if (status) {
2316 BT_ERR("Failed to disable LE scanning: status %d", status);
2317 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002318 }
2319
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002320 switch (hdev->discovery.type) {
2321 case DISCOV_TYPE_LE:
2322 hci_dev_lock(hdev);
2323 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2324 hci_dev_unlock(hdev);
2325 break;
2326
2327 case DISCOV_TYPE_INTERLEAVED:
2328 hci_req_init(&req, hdev);
2329
2330 memset(&cp, 0, sizeof(cp));
2331 memcpy(&cp.lap, lap, sizeof(cp.lap));
2332 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2333 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2334
2335 hci_dev_lock(hdev);
2336
2337 hci_inquiry_cache_flush(hdev);
2338
2339 err = hci_req_run(&req, inquiry_complete);
2340 if (err) {
2341 BT_ERR("Inquiry request failed: err %d", err);
2342 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2343 }
2344
2345 hci_dev_unlock(hdev);
2346 break;
2347 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002348}
2349
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002350static void le_scan_disable_work(struct work_struct *work)
2351{
2352 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002353 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002354 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002355 struct hci_request req;
2356 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002357
2358 BT_DBG("%s", hdev->name);
2359
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002360 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002361
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002362 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002363 cp.enable = LE_SCAN_DISABLE;
2364 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002365
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002366 err = hci_req_run(&req, le_scan_disable_work_complete);
2367 if (err)
2368 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002369}
2370
David Herrmann9be0dab2012-04-22 14:39:57 +02002371/* Alloc HCI device */
2372struct hci_dev *hci_alloc_dev(void)
2373{
2374 struct hci_dev *hdev;
2375
2376 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2377 if (!hdev)
2378 return NULL;
2379
David Herrmannb1b813d2012-04-22 14:39:58 +02002380 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2381 hdev->esco_type = (ESCO_HV1);
2382 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002383 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2384 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002385 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2386 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002387
David Herrmannb1b813d2012-04-22 14:39:58 +02002388 hdev->sniff_max_interval = 800;
2389 hdev->sniff_min_interval = 80;
2390
Marcel Holtmannbef64732013-10-11 08:23:19 -07002391 hdev->le_scan_interval = 0x0060;
2392 hdev->le_scan_window = 0x0030;
2393
David Herrmannb1b813d2012-04-22 14:39:58 +02002394 mutex_init(&hdev->lock);
2395 mutex_init(&hdev->req_lock);
2396
2397 INIT_LIST_HEAD(&hdev->mgmt_pending);
2398 INIT_LIST_HEAD(&hdev->blacklist);
2399 INIT_LIST_HEAD(&hdev->uuids);
2400 INIT_LIST_HEAD(&hdev->link_keys);
2401 INIT_LIST_HEAD(&hdev->long_term_keys);
2402 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002403 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002404
2405 INIT_WORK(&hdev->rx_work, hci_rx_work);
2406 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2407 INIT_WORK(&hdev->tx_work, hci_tx_work);
2408 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002409
David Herrmannb1b813d2012-04-22 14:39:58 +02002410 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2411 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2412 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2413
David Herrmannb1b813d2012-04-22 14:39:58 +02002414 skb_queue_head_init(&hdev->rx_q);
2415 skb_queue_head_init(&hdev->cmd_q);
2416 skb_queue_head_init(&hdev->raw_q);
2417
2418 init_waitqueue_head(&hdev->req_wait_q);
2419
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002420 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002421
David Herrmannb1b813d2012-04-22 14:39:58 +02002422 hci_init_sysfs(hdev);
2423 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002424
2425 return hdev;
2426}
2427EXPORT_SYMBOL(hci_alloc_dev);
2428
2429/* Free HCI device */
2430void hci_free_dev(struct hci_dev *hdev)
2431{
David Herrmann9be0dab2012-04-22 14:39:57 +02002432 /* will free via device release */
2433 put_device(&hdev->dev);
2434}
2435EXPORT_SYMBOL(hci_free_dev);
2436
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437/* Register HCI device */
2438int hci_register_dev(struct hci_dev *hdev)
2439{
David Herrmannb1b813d2012-04-22 14:39:58 +02002440 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
David Herrmann010666a2012-01-07 15:47:07 +01002442 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 return -EINVAL;
2444
Mat Martineau08add512011-11-02 16:18:36 -07002445 /* Do not allow HCI_AMP devices to register at index 0,
2446 * so the index can be used as the AMP controller ID.
2447 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002448 switch (hdev->dev_type) {
2449 case HCI_BREDR:
2450 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2451 break;
2452 case HCI_AMP:
2453 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2454 break;
2455 default:
2456 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002458
Sasha Levin3df92b32012-05-27 22:36:56 +02002459 if (id < 0)
2460 return id;
2461
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 sprintf(hdev->name, "hci%d", id);
2463 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002464
2465 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2466
Kees Cookd8537542013-07-03 15:04:57 -07002467 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2468 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002469 if (!hdev->workqueue) {
2470 error = -ENOMEM;
2471 goto err;
2472 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002473
Kees Cookd8537542013-07-03 15:04:57 -07002474 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2475 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002476 if (!hdev->req_workqueue) {
2477 destroy_workqueue(hdev->workqueue);
2478 error = -ENOMEM;
2479 goto err;
2480 }
2481
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002482 if (!IS_ERR_OR_NULL(bt_debugfs))
2483 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2484
David Herrmann33ca9542011-10-08 14:58:49 +02002485 error = hci_add_sysfs(hdev);
2486 if (error < 0)
2487 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002489 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002490 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2491 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002492 if (hdev->rfkill) {
2493 if (rfkill_register(hdev->rfkill) < 0) {
2494 rfkill_destroy(hdev->rfkill);
2495 hdev->rfkill = NULL;
2496 }
2497 }
2498
Johan Hedberg5e130362013-09-13 08:58:17 +03002499 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2500 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2501
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002502 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002503 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002504
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002505 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002506 /* Assume BR/EDR support until proven otherwise (such as
2507 * through reading supported features during init.
2508 */
2509 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2510 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002511
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002512 write_lock(&hci_dev_list_lock);
2513 list_add(&hdev->list, &hci_dev_list);
2514 write_unlock(&hci_dev_list_lock);
2515
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002517 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518
Johan Hedberg19202572013-01-14 22:33:51 +02002519 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002520
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002522
David Herrmann33ca9542011-10-08 14:58:49 +02002523err_wqueue:
2524 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002525 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002526err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002527 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002528
David Herrmann33ca9542011-10-08 14:58:49 +02002529 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530}
2531EXPORT_SYMBOL(hci_register_dev);
2532
2533/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002534void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535{
Sasha Levin3df92b32012-05-27 22:36:56 +02002536 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002537
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002538 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539
Johan Hovold94324962012-03-15 14:48:41 +01002540 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2541
Sasha Levin3df92b32012-05-27 22:36:56 +02002542 id = hdev->id;
2543
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002544 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002546 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547
2548 hci_dev_do_close(hdev);
2549
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302550 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002551 kfree_skb(hdev->reassembly[i]);
2552
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002553 cancel_work_sync(&hdev->power_on);
2554
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002555 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002556 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002557 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002558 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002559 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002560 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002561
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002562 /* mgmt_index_removed should take care of emptying the
2563 * pending list */
2564 BUG_ON(!list_empty(&hdev->mgmt_pending));
2565
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 hci_notify(hdev, HCI_DEV_UNREG);
2567
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002568 if (hdev->rfkill) {
2569 rfkill_unregister(hdev->rfkill);
2570 rfkill_destroy(hdev->rfkill);
2571 }
2572
David Herrmannce242972011-10-08 14:58:48 +02002573 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002574
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07002575 debugfs_remove_recursive(hdev->debugfs);
2576
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002577 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002578 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002579
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002580 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002581 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002582 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002583 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002584 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002585 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002586 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002587
David Herrmanndc946bd2012-01-07 15:47:24 +01002588 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002589
2590 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591}
2592EXPORT_SYMBOL(hci_unregister_dev);
2593
2594/* Suspend HCI device */
2595int hci_suspend_dev(struct hci_dev *hdev)
2596{
2597 hci_notify(hdev, HCI_DEV_SUSPEND);
2598 return 0;
2599}
2600EXPORT_SYMBOL(hci_suspend_dev);
2601
2602/* Resume HCI device */
2603int hci_resume_dev(struct hci_dev *hdev)
2604{
2605 hci_notify(hdev, HCI_DEV_RESUME);
2606 return 0;
2607}
2608EXPORT_SYMBOL(hci_resume_dev);
2609
Marcel Holtmann76bca882009-11-18 00:40:39 +01002610/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002611int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002612{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002613 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002614 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002615 kfree_skb(skb);
2616 return -ENXIO;
2617 }
2618
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002619 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002620 bt_cb(skb)->incoming = 1;
2621
2622 /* Time stamp */
2623 __net_timestamp(skb);
2624
Marcel Holtmann76bca882009-11-18 00:40:39 +01002625 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002626 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002627
Marcel Holtmann76bca882009-11-18 00:40:39 +01002628 return 0;
2629}
2630EXPORT_SYMBOL(hci_recv_frame);
2631
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302632static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002633 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302634{
2635 int len = 0;
2636 int hlen = 0;
2637 int remain = count;
2638 struct sk_buff *skb;
2639 struct bt_skb_cb *scb;
2640
2641 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002642 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302643 return -EILSEQ;
2644
2645 skb = hdev->reassembly[index];
2646
2647 if (!skb) {
2648 switch (type) {
2649 case HCI_ACLDATA_PKT:
2650 len = HCI_MAX_FRAME_SIZE;
2651 hlen = HCI_ACL_HDR_SIZE;
2652 break;
2653 case HCI_EVENT_PKT:
2654 len = HCI_MAX_EVENT_SIZE;
2655 hlen = HCI_EVENT_HDR_SIZE;
2656 break;
2657 case HCI_SCODATA_PKT:
2658 len = HCI_MAX_SCO_SIZE;
2659 hlen = HCI_SCO_HDR_SIZE;
2660 break;
2661 }
2662
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002663 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302664 if (!skb)
2665 return -ENOMEM;
2666
2667 scb = (void *) skb->cb;
2668 scb->expect = hlen;
2669 scb->pkt_type = type;
2670
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302671 hdev->reassembly[index] = skb;
2672 }
2673
2674 while (count) {
2675 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002676 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302677
2678 memcpy(skb_put(skb, len), data, len);
2679
2680 count -= len;
2681 data += len;
2682 scb->expect -= len;
2683 remain = count;
2684
2685 switch (type) {
2686 case HCI_EVENT_PKT:
2687 if (skb->len == HCI_EVENT_HDR_SIZE) {
2688 struct hci_event_hdr *h = hci_event_hdr(skb);
2689 scb->expect = h->plen;
2690
2691 if (skb_tailroom(skb) < scb->expect) {
2692 kfree_skb(skb);
2693 hdev->reassembly[index] = NULL;
2694 return -ENOMEM;
2695 }
2696 }
2697 break;
2698
2699 case HCI_ACLDATA_PKT:
2700 if (skb->len == HCI_ACL_HDR_SIZE) {
2701 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2702 scb->expect = __le16_to_cpu(h->dlen);
2703
2704 if (skb_tailroom(skb) < scb->expect) {
2705 kfree_skb(skb);
2706 hdev->reassembly[index] = NULL;
2707 return -ENOMEM;
2708 }
2709 }
2710 break;
2711
2712 case HCI_SCODATA_PKT:
2713 if (skb->len == HCI_SCO_HDR_SIZE) {
2714 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2715 scb->expect = h->dlen;
2716
2717 if (skb_tailroom(skb) < scb->expect) {
2718 kfree_skb(skb);
2719 hdev->reassembly[index] = NULL;
2720 return -ENOMEM;
2721 }
2722 }
2723 break;
2724 }
2725
2726 if (scb->expect == 0) {
2727 /* Complete frame */
2728
2729 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002730 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302731
2732 hdev->reassembly[index] = NULL;
2733 return remain;
2734 }
2735 }
2736
2737 return remain;
2738}
2739
Marcel Holtmannef222012007-07-11 06:42:04 +02002740int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2741{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302742 int rem = 0;
2743
Marcel Holtmannef222012007-07-11 06:42:04 +02002744 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2745 return -EILSEQ;
2746
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002747 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002748 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302749 if (rem < 0)
2750 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002751
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302752 data += (count - rem);
2753 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002754 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002755
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302756 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002757}
2758EXPORT_SYMBOL(hci_recv_fragment);
2759
Suraj Sumangala99811512010-07-14 13:02:19 +05302760#define STREAM_REASSEMBLY 0
2761
2762int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2763{
2764 int type;
2765 int rem = 0;
2766
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002767 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302768 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2769
2770 if (!skb) {
2771 struct { char type; } *pkt;
2772
2773 /* Start of the frame */
2774 pkt = data;
2775 type = pkt->type;
2776
2777 data++;
2778 count--;
2779 } else
2780 type = bt_cb(skb)->pkt_type;
2781
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002782 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002783 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302784 if (rem < 0)
2785 return rem;
2786
2787 data += (count - rem);
2788 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002789 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302790
2791 return rem;
2792}
2793EXPORT_SYMBOL(hci_recv_stream_fragment);
2794
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795/* ---- Interface to upper protocols ---- */
2796
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797int hci_register_cb(struct hci_cb *cb)
2798{
2799 BT_DBG("%p name %s", cb, cb->name);
2800
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002801 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002803 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
2805 return 0;
2806}
2807EXPORT_SYMBOL(hci_register_cb);
2808
2809int hci_unregister_cb(struct hci_cb *cb)
2810{
2811 BT_DBG("%p name %s", cb, cb->name);
2812
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002813 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002815 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816
2817 return 0;
2818}
2819EXPORT_SYMBOL(hci_unregister_cb);
2820
Marcel Holtmann51086992013-10-10 14:54:19 -07002821static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002823 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002825 /* Time stamp */
2826 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002828 /* Send copy to monitor */
2829 hci_send_to_monitor(hdev, skb);
2830
2831 if (atomic_read(&hdev->promisc)) {
2832 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002833 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 }
2835
2836 /* Get rid of skb owner, prior to sending to the driver. */
2837 skb_orphan(skb);
2838
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002839 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002840 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841}
2842
Johan Hedberg3119ae92013-03-05 20:37:44 +02002843void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2844{
2845 skb_queue_head_init(&req->cmd_q);
2846 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002847 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002848}
2849
2850int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2851{
2852 struct hci_dev *hdev = req->hdev;
2853 struct sk_buff *skb;
2854 unsigned long flags;
2855
2856 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2857
Andre Guedes5d73e032013-03-08 11:20:16 -03002858 /* If an error occured during request building, remove all HCI
2859 * commands queued on the HCI request queue.
2860 */
2861 if (req->err) {
2862 skb_queue_purge(&req->cmd_q);
2863 return req->err;
2864 }
2865
Johan Hedberg3119ae92013-03-05 20:37:44 +02002866 /* Do not allow empty requests */
2867 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002868 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002869
2870 skb = skb_peek_tail(&req->cmd_q);
2871 bt_cb(skb)->req.complete = complete;
2872
2873 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2874 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2875 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2876
2877 queue_work(hdev->workqueue, &hdev->cmd_work);
2878
2879 return 0;
2880}
2881
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002882static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002883 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884{
2885 int len = HCI_COMMAND_HDR_SIZE + plen;
2886 struct hci_command_hdr *hdr;
2887 struct sk_buff *skb;
2888
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002890 if (!skb)
2891 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892
2893 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002894 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 hdr->plen = plen;
2896
2897 if (plen)
2898 memcpy(skb_put(skb, plen), param, plen);
2899
2900 BT_DBG("skb len %d", skb->len);
2901
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002902 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002903
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002904 return skb;
2905}
2906
2907/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002908int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2909 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002910{
2911 struct sk_buff *skb;
2912
2913 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2914
2915 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2916 if (!skb) {
2917 BT_ERR("%s no memory for command", hdev->name);
2918 return -ENOMEM;
2919 }
2920
Johan Hedberg11714b32013-03-05 20:37:47 +02002921 /* Stand-alone HCI commands must be flaged as
2922 * single-command requests.
2923 */
2924 bt_cb(skb)->req.start = true;
2925
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002927 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928
2929 return 0;
2930}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
Johan Hedberg71c76a12013-03-05 20:37:46 +02002932/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002933void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2934 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002935{
2936 struct hci_dev *hdev = req->hdev;
2937 struct sk_buff *skb;
2938
2939 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2940
Andre Guedes34739c12013-03-08 11:20:18 -03002941 /* If an error occured during request building, there is no point in
2942 * queueing the HCI command. We can simply return.
2943 */
2944 if (req->err)
2945 return;
2946
Johan Hedberg71c76a12013-03-05 20:37:46 +02002947 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2948 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002949 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2950 hdev->name, opcode);
2951 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002952 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002953 }
2954
2955 if (skb_queue_empty(&req->cmd_q))
2956 bt_cb(skb)->req.start = true;
2957
Johan Hedberg02350a72013-04-03 21:50:29 +03002958 bt_cb(skb)->req.event = event;
2959
Johan Hedberg71c76a12013-03-05 20:37:46 +02002960 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002961}
2962
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002963void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2964 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002965{
2966 hci_req_add_ev(req, opcode, plen, param, 0);
2967}
2968
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002970void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971{
2972 struct hci_command_hdr *hdr;
2973
2974 if (!hdev->sent_cmd)
2975 return NULL;
2976
2977 hdr = (void *) hdev->sent_cmd->data;
2978
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002979 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 return NULL;
2981
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002982 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983
2984 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2985}
2986
2987/* Send ACL data */
2988static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2989{
2990 struct hci_acl_hdr *hdr;
2991 int len = skb->len;
2992
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002993 skb_push(skb, HCI_ACL_HDR_SIZE);
2994 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002995 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002996 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2997 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998}
2999
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003000static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003001 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003003 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 struct hci_dev *hdev = conn->hdev;
3005 struct sk_buff *list;
3006
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003007 skb->len = skb_headlen(skb);
3008 skb->data_len = 0;
3009
3010 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03003011
3012 switch (hdev->dev_type) {
3013 case HCI_BREDR:
3014 hci_add_acl_hdr(skb, conn->handle, flags);
3015 break;
3016 case HCI_AMP:
3017 hci_add_acl_hdr(skb, chan->handle, flags);
3018 break;
3019 default:
3020 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3021 return;
3022 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03003023
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003024 list = skb_shinfo(skb)->frag_list;
3025 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 /* Non fragmented */
3027 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3028
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003029 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 } else {
3031 /* Fragmented */
3032 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3033
3034 skb_shinfo(skb)->frag_list = NULL;
3035
3036 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003037 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003039 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003040
3041 flags &= ~ACL_START;
3042 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043 do {
3044 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003045
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003046 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02003047 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048
3049 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3050
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003051 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 } while (list);
3053
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02003054 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003056}
3057
3058void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3059{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003060 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003061
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003062 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003063
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03003064 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003066 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068
3069/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03003070void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071{
3072 struct hci_dev *hdev = conn->hdev;
3073 struct hci_sco_hdr hdr;
3074
3075 BT_DBG("%s len %d", hdev->name, skb->len);
3076
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003077 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 hdr.dlen = skb->len;
3079
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03003080 skb_push(skb, HCI_SCO_HDR_SIZE);
3081 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07003082 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003084 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01003085
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003087 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089
3090/* ---- HCI TX task (outgoing data) ---- */
3091
3092/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003093static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3094 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095{
3096 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003097 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02003098 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003100 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003102
3103 rcu_read_lock();
3104
3105 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02003106 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02003108
3109 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3110 continue;
3111
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112 num++;
3113
3114 if (c->sent < min) {
3115 min = c->sent;
3116 conn = c;
3117 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003118
3119 if (hci_conn_num(hdev, type) == num)
3120 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 }
3122
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003123 rcu_read_unlock();
3124
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003126 int cnt, q;
3127
3128 switch (conn->type) {
3129 case ACL_LINK:
3130 cnt = hdev->acl_cnt;
3131 break;
3132 case SCO_LINK:
3133 case ESCO_LINK:
3134 cnt = hdev->sco_cnt;
3135 break;
3136 case LE_LINK:
3137 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3138 break;
3139 default:
3140 cnt = 0;
3141 BT_ERR("Unknown link type");
3142 }
3143
3144 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145 *quote = q ? q : 1;
3146 } else
3147 *quote = 0;
3148
3149 BT_DBG("conn %p quote %d", conn, *quote);
3150 return conn;
3151}
3152
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003153static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154{
3155 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003156 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157
Ville Tervobae1f5d92011-02-10 22:38:53 -03003158 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003160 rcu_read_lock();
3161
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003163 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003164 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003165 BT_ERR("%s killing stalled connection %pMR",
3166 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003167 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168 }
3169 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003170
3171 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172}
3173
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003174static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3175 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003176{
3177 struct hci_conn_hash *h = &hdev->conn_hash;
3178 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003179 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003180 struct hci_conn *conn;
3181 int cnt, q, conn_num = 0;
3182
3183 BT_DBG("%s", hdev->name);
3184
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003185 rcu_read_lock();
3186
3187 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003188 struct hci_chan *tmp;
3189
3190 if (conn->type != type)
3191 continue;
3192
3193 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3194 continue;
3195
3196 conn_num++;
3197
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003198 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003199 struct sk_buff *skb;
3200
3201 if (skb_queue_empty(&tmp->data_q))
3202 continue;
3203
3204 skb = skb_peek(&tmp->data_q);
3205 if (skb->priority < cur_prio)
3206 continue;
3207
3208 if (skb->priority > cur_prio) {
3209 num = 0;
3210 min = ~0;
3211 cur_prio = skb->priority;
3212 }
3213
3214 num++;
3215
3216 if (conn->sent < min) {
3217 min = conn->sent;
3218 chan = tmp;
3219 }
3220 }
3221
3222 if (hci_conn_num(hdev, type) == conn_num)
3223 break;
3224 }
3225
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003226 rcu_read_unlock();
3227
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003228 if (!chan)
3229 return NULL;
3230
3231 switch (chan->conn->type) {
3232 case ACL_LINK:
3233 cnt = hdev->acl_cnt;
3234 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003235 case AMP_LINK:
3236 cnt = hdev->block_cnt;
3237 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003238 case SCO_LINK:
3239 case ESCO_LINK:
3240 cnt = hdev->sco_cnt;
3241 break;
3242 case LE_LINK:
3243 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3244 break;
3245 default:
3246 cnt = 0;
3247 BT_ERR("Unknown link type");
3248 }
3249
3250 q = cnt / num;
3251 *quote = q ? q : 1;
3252 BT_DBG("chan %p quote %d", chan, *quote);
3253 return chan;
3254}
3255
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003256static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3257{
3258 struct hci_conn_hash *h = &hdev->conn_hash;
3259 struct hci_conn *conn;
3260 int num = 0;
3261
3262 BT_DBG("%s", hdev->name);
3263
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003264 rcu_read_lock();
3265
3266 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003267 struct hci_chan *chan;
3268
3269 if (conn->type != type)
3270 continue;
3271
3272 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3273 continue;
3274
3275 num++;
3276
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003277 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003278 struct sk_buff *skb;
3279
3280 if (chan->sent) {
3281 chan->sent = 0;
3282 continue;
3283 }
3284
3285 if (skb_queue_empty(&chan->data_q))
3286 continue;
3287
3288 skb = skb_peek(&chan->data_q);
3289 if (skb->priority >= HCI_PRIO_MAX - 1)
3290 continue;
3291
3292 skb->priority = HCI_PRIO_MAX - 1;
3293
3294 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003295 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003296 }
3297
3298 if (hci_conn_num(hdev, type) == num)
3299 break;
3300 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003301
3302 rcu_read_unlock();
3303
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003304}
3305
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003306static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3307{
3308 /* Calculate count of blocks used by this packet */
3309 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3310}
3311
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003312static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314 if (!test_bit(HCI_RAW, &hdev->flags)) {
3315 /* ACL tx timeout must be longer than maximum
3316 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003317 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003318 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003319 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003321}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003323static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003324{
3325 unsigned int cnt = hdev->acl_cnt;
3326 struct hci_chan *chan;
3327 struct sk_buff *skb;
3328 int quote;
3329
3330 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003331
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003332 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003333 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003334 u32 priority = (skb_peek(&chan->data_q))->priority;
3335 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003336 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003337 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003338
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003339 /* Stop if priority has changed */
3340 if (skb->priority < priority)
3341 break;
3342
3343 skb = skb_dequeue(&chan->data_q);
3344
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003345 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003346 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003347
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003348 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 hdev->acl_last_tx = jiffies;
3350
3351 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003352 chan->sent++;
3353 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 }
3355 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003356
3357 if (cnt != hdev->acl_cnt)
3358 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359}
3360
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003361static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003362{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003363 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003364 struct hci_chan *chan;
3365 struct sk_buff *skb;
3366 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003367 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003368
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003369 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003370
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003371 BT_DBG("%s", hdev->name);
3372
3373 if (hdev->dev_type == HCI_AMP)
3374 type = AMP_LINK;
3375 else
3376 type = ACL_LINK;
3377
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003378 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003379 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003380 u32 priority = (skb_peek(&chan->data_q))->priority;
3381 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3382 int blocks;
3383
3384 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003385 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003386
3387 /* Stop if priority has changed */
3388 if (skb->priority < priority)
3389 break;
3390
3391 skb = skb_dequeue(&chan->data_q);
3392
3393 blocks = __get_blocks(hdev, skb);
3394 if (blocks > hdev->block_cnt)
3395 return;
3396
3397 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003398 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003399
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003400 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003401 hdev->acl_last_tx = jiffies;
3402
3403 hdev->block_cnt -= blocks;
3404 quote -= blocks;
3405
3406 chan->sent += blocks;
3407 chan->conn->sent += blocks;
3408 }
3409 }
3410
3411 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003412 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003413}
3414
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003415static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003416{
3417 BT_DBG("%s", hdev->name);
3418
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003419 /* No ACL link over BR/EDR controller */
3420 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3421 return;
3422
3423 /* No AMP link over AMP controller */
3424 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003425 return;
3426
3427 switch (hdev->flow_ctl_mode) {
3428 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3429 hci_sched_acl_pkt(hdev);
3430 break;
3431
3432 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3433 hci_sched_acl_blk(hdev);
3434 break;
3435 }
3436}
3437
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003439static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440{
3441 struct hci_conn *conn;
3442 struct sk_buff *skb;
3443 int quote;
3444
3445 BT_DBG("%s", hdev->name);
3446
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003447 if (!hci_conn_num(hdev, SCO_LINK))
3448 return;
3449
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3451 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3452 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003453 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454
3455 conn->sent++;
3456 if (conn->sent == ~0)
3457 conn->sent = 0;
3458 }
3459 }
3460}
3461
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003462static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003463{
3464 struct hci_conn *conn;
3465 struct sk_buff *skb;
3466 int quote;
3467
3468 BT_DBG("%s", hdev->name);
3469
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003470 if (!hci_conn_num(hdev, ESCO_LINK))
3471 return;
3472
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003473 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3474 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003475 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3476 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003477 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003478
3479 conn->sent++;
3480 if (conn->sent == ~0)
3481 conn->sent = 0;
3482 }
3483 }
3484}
3485
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003486static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003487{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003488 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003489 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003490 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003491
3492 BT_DBG("%s", hdev->name);
3493
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003494 if (!hci_conn_num(hdev, LE_LINK))
3495 return;
3496
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003497 if (!test_bit(HCI_RAW, &hdev->flags)) {
3498 /* LE tx timeout must be longer than maximum
3499 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003500 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003501 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003502 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003503 }
3504
3505 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003506 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003507 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003508 u32 priority = (skb_peek(&chan->data_q))->priority;
3509 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003510 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003511 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003512
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003513 /* Stop if priority has changed */
3514 if (skb->priority < priority)
3515 break;
3516
3517 skb = skb_dequeue(&chan->data_q);
3518
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003519 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003520 hdev->le_last_tx = jiffies;
3521
3522 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003523 chan->sent++;
3524 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003525 }
3526 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003527
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003528 if (hdev->le_pkts)
3529 hdev->le_cnt = cnt;
3530 else
3531 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003532
3533 if (cnt != tmp)
3534 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003535}
3536
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003537static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003539 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 struct sk_buff *skb;
3541
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003542 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003543 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544
Marcel Holtmann52de5992013-09-03 18:08:38 -07003545 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3546 /* Schedule queues and send stuff to HCI driver */
3547 hci_sched_acl(hdev);
3548 hci_sched_sco(hdev);
3549 hci_sched_esco(hdev);
3550 hci_sched_le(hdev);
3551 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003552
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 /* Send next queued raw (unknown type) packet */
3554 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003555 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556}
3557
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003558/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559
3560/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003561static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562{
3563 struct hci_acl_hdr *hdr = (void *) skb->data;
3564 struct hci_conn *conn;
3565 __u16 handle, flags;
3566
3567 skb_pull(skb, HCI_ACL_HDR_SIZE);
3568
3569 handle = __le16_to_cpu(hdr->handle);
3570 flags = hci_flags(handle);
3571 handle = hci_handle(handle);
3572
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003573 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003574 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575
3576 hdev->stat.acl_rx++;
3577
3578 hci_dev_lock(hdev);
3579 conn = hci_conn_hash_lookup_handle(hdev, handle);
3580 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003581
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003583 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003584
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003586 l2cap_recv_acldata(conn, skb, flags);
3587 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003589 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003590 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591 }
3592
3593 kfree_skb(skb);
3594}
3595
3596/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003597static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598{
3599 struct hci_sco_hdr *hdr = (void *) skb->data;
3600 struct hci_conn *conn;
3601 __u16 handle;
3602
3603 skb_pull(skb, HCI_SCO_HDR_SIZE);
3604
3605 handle = __le16_to_cpu(hdr->handle);
3606
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003607 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608
3609 hdev->stat.sco_rx++;
3610
3611 hci_dev_lock(hdev);
3612 conn = hci_conn_hash_lookup_handle(hdev, handle);
3613 hci_dev_unlock(hdev);
3614
3615 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003617 sco_recv_scodata(conn, skb);
3618 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003620 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003621 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622 }
3623
3624 kfree_skb(skb);
3625}
3626
Johan Hedberg9238f362013-03-05 20:37:48 +02003627static bool hci_req_is_complete(struct hci_dev *hdev)
3628{
3629 struct sk_buff *skb;
3630
3631 skb = skb_peek(&hdev->cmd_q);
3632 if (!skb)
3633 return true;
3634
3635 return bt_cb(skb)->req.start;
3636}
3637
Johan Hedberg42c6b122013-03-05 20:37:49 +02003638static void hci_resend_last(struct hci_dev *hdev)
3639{
3640 struct hci_command_hdr *sent;
3641 struct sk_buff *skb;
3642 u16 opcode;
3643
3644 if (!hdev->sent_cmd)
3645 return;
3646
3647 sent = (void *) hdev->sent_cmd->data;
3648 opcode = __le16_to_cpu(sent->opcode);
3649 if (opcode == HCI_OP_RESET)
3650 return;
3651
3652 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3653 if (!skb)
3654 return;
3655
3656 skb_queue_head(&hdev->cmd_q, skb);
3657 queue_work(hdev->workqueue, &hdev->cmd_work);
3658}
3659
Johan Hedberg9238f362013-03-05 20:37:48 +02003660void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3661{
3662 hci_req_complete_t req_complete = NULL;
3663 struct sk_buff *skb;
3664 unsigned long flags;
3665
3666 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3667
Johan Hedberg42c6b122013-03-05 20:37:49 +02003668 /* If the completed command doesn't match the last one that was
3669 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003670 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003671 if (!hci_sent_cmd_data(hdev, opcode)) {
3672 /* Some CSR based controllers generate a spontaneous
3673 * reset complete event during init and any pending
3674 * command will never be completed. In such a case we
3675 * need to resend whatever was the last sent
3676 * command.
3677 */
3678 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3679 hci_resend_last(hdev);
3680
Johan Hedberg9238f362013-03-05 20:37:48 +02003681 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003682 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003683
3684 /* If the command succeeded and there's still more commands in
3685 * this request the request is not yet complete.
3686 */
3687 if (!status && !hci_req_is_complete(hdev))
3688 return;
3689
3690 /* If this was the last command in a request the complete
3691 * callback would be found in hdev->sent_cmd instead of the
3692 * command queue (hdev->cmd_q).
3693 */
3694 if (hdev->sent_cmd) {
3695 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003696
3697 if (req_complete) {
3698 /* We must set the complete callback to NULL to
3699 * avoid calling the callback more than once if
3700 * this function gets called again.
3701 */
3702 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3703
Johan Hedberg9238f362013-03-05 20:37:48 +02003704 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003705 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003706 }
3707
3708 /* Remove all pending commands belonging to this request */
3709 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3710 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3711 if (bt_cb(skb)->req.start) {
3712 __skb_queue_head(&hdev->cmd_q, skb);
3713 break;
3714 }
3715
3716 req_complete = bt_cb(skb)->req.complete;
3717 kfree_skb(skb);
3718 }
3719 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3720
3721call_complete:
3722 if (req_complete)
3723 req_complete(hdev, status);
3724}
3725
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003726static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003728 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729 struct sk_buff *skb;
3730
3731 BT_DBG("%s", hdev->name);
3732
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003734 /* Send copy to monitor */
3735 hci_send_to_monitor(hdev, skb);
3736
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737 if (atomic_read(&hdev->promisc)) {
3738 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003739 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740 }
3741
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003742 if (test_bit(HCI_RAW, &hdev->flags) ||
3743 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 kfree_skb(skb);
3745 continue;
3746 }
3747
3748 if (test_bit(HCI_INIT, &hdev->flags)) {
3749 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003750 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 case HCI_ACLDATA_PKT:
3752 case HCI_SCODATA_PKT:
3753 kfree_skb(skb);
3754 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003755 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003756 }
3757
3758 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003759 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003761 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 hci_event_packet(hdev, skb);
3763 break;
3764
3765 case HCI_ACLDATA_PKT:
3766 BT_DBG("%s ACL data packet", hdev->name);
3767 hci_acldata_packet(hdev, skb);
3768 break;
3769
3770 case HCI_SCODATA_PKT:
3771 BT_DBG("%s SCO data packet", hdev->name);
3772 hci_scodata_packet(hdev, skb);
3773 break;
3774
3775 default:
3776 kfree_skb(skb);
3777 break;
3778 }
3779 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780}
3781
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003782static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003784 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785 struct sk_buff *skb;
3786
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003787 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3788 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003791 if (atomic_read(&hdev->cmd_cnt)) {
3792 skb = skb_dequeue(&hdev->cmd_q);
3793 if (!skb)
3794 return;
3795
Wei Yongjun7585b972009-02-25 18:29:52 +08003796 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003798 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003799 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003801 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003802 if (test_bit(HCI_RESET, &hdev->flags))
3803 del_timer(&hdev->cmd_timer);
3804 else
3805 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003806 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807 } else {
3808 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003809 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810 }
3811 }
3812}